diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..8035e26a7 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# from https://help.github.com/articles/dealing-with-line-endings/ +# +# always use LF, even if the files are edited on windows, they need to be compiled/used on unix +* text eol=lf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary diff --git a/DAMASK_env.bat b/DAMASK_env.bat index 0c1afb0d6..202ddb00e 100644 --- a/DAMASK_env.bat +++ b/DAMASK_env.bat @@ -5,10 +5,10 @@ set LOCATION=%~dp0 set DAMASK_ROOT=%LOCATION%\DAMASK set DAMASK_NUM_THREADS=2 chcp 1252 -Title Düsseldorf Advanced Materials Simulation Kit - DAMASK, MPIE Düsseldorf +Title Düsseldorf Advanced Materials Simulation Kit - DAMASK, MPIE Düsseldorf echo. -echo Düsseldorf Advanced Materials Simulation Kit - DAMASK -echo Max-Planck-Institut für Eisenforschung, Düsseldorf +echo Düsseldorf Advanced Materials Simulation Kit - DAMASK +echo Max-Planck-Institut für Eisenforschung, Düsseldorf echo http://damask.mpie.de echo. echo Preparing environment ... diff --git a/DAMASK_env.sh b/DAMASK_env.sh index bfb5c12df..535708ed4 100644 --- a/DAMASK_env.sh +++ b/DAMASK_env.sh @@ -2,7 +2,7 @@ # usage: source DAMASK_env.sh if [ "$OSTYPE" == "linux-gnu" ] || [ "$OSTYPE" == 'linux' ]; then - DAMASK_ROOT=$(readlink -f "`dirname $BASH_SOURCE`") + DAMASK_ROOT=$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "`dirname $BASH_SOURCE`") else [[ "${BASH_SOURCE::1}" == "/" ]] && BASE="" || BASE="`pwd`/" STAT=$(stat "`dirname $BASE$BASH_SOURCE`") @@ -18,11 +18,11 @@ fi SOLVER=`which DAMASK_spectral 2>/dev/null` if [ "x$SOLVER" == "x" ]; then - export SOLVER='Not found!' + SOLVER='Not found!' fi PROCESSING=`which postResults 2>/dev/null` if [ "x$PROCESSING" == "x" ]; then - export PROCESSING='Not found!' + PROCESSING='Not found!' fi # according to http://software.intel.com/en-us/forums/topic/501500 @@ -53,7 +53,11 @@ if [ ! -z "$PS1" ]; then [[ "x$SOLVER" != "x" ]] && echo "Spectral Solver $SOLVER" [[ "x$PROCESSING" != "x" ]] && echo "Post Processing $PROCESSING" echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" - [[ "x$PETSC_DIR" != "x" ]] && echo "PETSc location $PETSC_DIR" + if [ "x$PETSC_DIR" != "x" ]; then + echo "PETSc location $PETSC_DIR" + [[ `python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR"` == $PETSC_DIR ]] \ + || echo " ~~> "`python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR"` + fi [[ "x$PETSC_ARCH" != "x" ]] && echo "PETSc architecture $PETSC_ARCH" echo "MSC.Marc/Mentat $MSC_ROOT" echo diff --git a/DAMASK_env.zsh b/DAMASK_env.zsh index 521bfe8e1..b4b2d6953 100644 --- a/DAMASK_env.zsh +++ b/DAMASK_env.zsh @@ -51,7 +51,10 @@ if [ ! -z "$PS1" ]; then [[ "x$SOLVER" != "x" ]] && echo "Spectral Solver $SOLVER" [[ "x$PROCESSING" != "x" ]] && echo "Post Processing $PROCESSING" echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" - [[ "x$PETSC_DIR" != "x" ]] && echo "PETSc location $PETSC_DIR" + if [ "x$PETSC_DIR" != "x" ]; then + echo "PETSc location $PETSC_DIR" + [[ `readlink -f $PETSC_DIR` == $PETSC_DIR ]] || echo " ~~> "`readlink -f $PETSC_DIR` + fi [[ "x$PETSC_ARCH" != "x" ]] && echo "PETSc architecture $PETSC_ARCH" echo "MSC.Marc/Mentat $MSC_ROOT" echo diff --git a/LICENSE b/LICENSE index 2402b1685..5a76343a0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2011-15 Max-Planck-Institut für Eisenforschung GmbH +Copyright 2011-16 Max-Planck-Institut für Eisenforschung GmbH This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/README b/README index 7fd364644..6705c13fa 100644 --- a/README +++ b/README @@ -2,9 +2,9 @@ visit damask.mpie.de for installation and usage instructions CONTACT INFORMATION -Max-Planck-Institut für Eisenforschung GmbH +Max-Planck-Institut für Eisenforschung GmbH Max-Planck-Str. 1 -40237 Düsseldorf +40237 Düsseldorf Germany Email: DAMASK@mpie.de diff --git a/VERSION b/VERSION index 6e8120049..c271e0939 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -revision3813-984-gf13bddd +v2.0.0-97-g8b27de7 diff --git a/code/.gitattributes b/code/.gitattributes new file mode 100644 index 000000000..8035e26a7 --- /dev/null +++ b/code/.gitattributes @@ -0,0 +1,8 @@ +# from https://help.github.com/articles/dealing-with-line-endings/ +# +# always use LF, even if the files are edited on windows, they need to be compiled/used on unix +* text eol=lf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary diff --git a/code/.gitignore b/code/.gitignore new file mode 100644 index 000000000..bc33403b4 --- /dev/null +++ b/code/.gitignore @@ -0,0 +1,3 @@ +DAMASK_marc*.f90 +quit__genmod.f90 +*.marc diff --git a/config/Phase_DisloKMC_Tungsten.config b/config/Phase_DisloKMC_Tungsten.config deleted file mode 100644 index 799c80e61..000000000 --- a/config/Phase_DisloKMC_Tungsten.config +++ /dev/null @@ -1,37 +0,0 @@ -### $Id$ ### - -[Tungsten] -elasticity hooke -plasticity dislokmc - -### Material parameters ### -lattice_structure bcc -C11 523.0e9 # From Marinica et al. Journal of Physics: Condensed Matter(2013) -C12 202.0e9 -C44 161.0e9 - -grainsize 2.0e-5 # Average grain size [m] 2.0e-5 -SolidSolutionStrength 0.0 # Strength due to elements in solid solution - -### Dislocation glide parameters ### -#per family -Nslip 12 0 -slipburgers 2.72e-10 # Burgers vector of slip system [m] -rhoedge0 1.0e12 # Initial edge dislocation density [m/m**3] -rhoedgedip0 1.0 # Initial edged dipole dislocation density [m/m**3] -Qedge 2.725e-19 # Activation energy for dislocation glide [J] -v0 3560.3 # Initial glide velocity [m/s](kmC) -p_slip 0.16 # p-exponent in glide velocity -q_slip 1.00 # q-exponent in glide velocity -u_slip 2.47 # u-exponent of stress pre-factor (kmC) -s_slip 0.97 # self hardening in glide velocity (kmC) -tau_peierls 2.03e9 # peierls stress [Pa] - -#hardening -dipoleformationfactor 0 # to have hardening due to dipole formation off -CLambdaSlip 10.0 # Adj. parameter controlling dislocation mean free path -D0 4.0e-5 # Vacancy diffusion prefactor [m**2/s] -Qsd 4.5e-19 # Activation energy for climb [J] -Catomicvolume 1.0 # Adj. parameter controlling the atomic volume [in b] -Cedgedipmindistance 1.0 # Adj. parameter controlling the minimum dipole distance [in b] -interaction_slipslip 0.2 0.11 0.19 0.15 0.11 0.17 diff --git a/config/numerics.config b/config/numerics.config index 62adb552f..24241b8c4 100644 --- a/config/numerics.config +++ b/config/numerics.config @@ -54,12 +54,12 @@ discrepancyPower_RGC 5.0 fixed_seed 0 # put any number larger than zero, integer, if you want to have a pseudo random distribution ## spectral parameters ## -err_div_tolAbs 1.0e-3 # relative tolerance for fulfillment of stress equilibrium -err_div_tolRel 5.0e-4 # absolute tolerance for fulfillment of stress equilibrium -err_curl_tolAbs 1.0e-12 # relative tolerance for fulfillment of strain compatibility -err_curl_tolRel 5.0e-4 # absolute tolerance for fulfillment of strain compatibility -err_stress_tolrel 0.01 # relative tolerance for fulfillment of stress BC -err_stress_tolabs 1.0e3 # absolute tolerance for fulfillment of stress BC +err_div_tolAbs 1.0e-3 # absolute tolerance for fulfillment of stress equilibrium +err_div_tolRel 5.0e-4 # relative tolerance for fulfillment of stress equilibrium +err_curl_tolAbs 1.0e-12 # absolute tolerance for fulfillment of strain compatibility +err_curl_tolRel 5.0e-4 # relative tolerance for fulfillment of strain compatibility +err_stress_tolAbs 1.0e3 # absolute tolerance for fulfillment of stress BC +err_stress_tolRel 0.01 # relative tolerance for fulfillment of stress BC fftw_timelimit -1.0 # timelimit of plan creation for FFTW, see manual on www.fftw.org, Default -1.0: disable timelimit rotation_tol 1.0e-12 # tolerance of rotation specified in loadcase, Default 1.0e-12: first guess fftw_plan_mode FFTW_PATIENT # reads the planing-rigor flag, see manual on www.fftw.org, Default FFTW_PATIENT: use patient planner flag diff --git a/lib/damask/.gitignore b/lib/damask/.gitignore new file mode 100644 index 000000000..00feaa11f --- /dev/null +++ b/lib/damask/.gitignore @@ -0,0 +1,2 @@ +core.so +corientation.so diff --git a/lib/damask/__init__.py b/lib/damask/__init__.py index 6b980b750..1b6ec409d 100644 --- a/lib/damask/__init__.py +++ b/lib/damask/__init__.py @@ -1,27 +1,27 @@ # -*- coding: UTF-8 no BOM -*- -# $Id$ +"""Main aggregator""" import sys, os with open(os.path.join(os.path.dirname(__file__),'../../VERSION')) as f: version = f.readline()[:-1] -from .environment import Environment # only one class -from .asciitable import ASCIItable # only one class -from .config import Material # will be extended to debug and numerics -from .colormaps import Colormap, Color -from .orientation import Quaternion, Rodrigues, Symmetry, Orientation +from .environment import Environment # noqa +from .asciitable import ASCIItable # noqa +from .config import Material # noqa +from .colormaps import Colormap, Color # noqa +from .orientation import Quaternion, Rodrigues, Symmetry, Orientation # noqa # try: # from .corientation import Quaternion, Rodrigues, Symmetry, Orientation # print "Import Cython version of Orientation module" # except: # from .orientation import Quaternion, Rodrigues, Symmetry, Orientation #from .block import Block # only one class -from .result import Result # only one class -from .geometry import Geometry # one class with subclasses -from .solver import Solver # one class with subclasses -from .test import Test -from .util import extendableOption +from .result import Result # noqa +from .geometry import Geometry # noqa +from .solver import Solver # noqa +from .test import Test # noqa +from .util import extendableOption # noqa try: from . import core diff --git a/lib/damask/asciitable.py b/lib/damask/asciitable.py index 448c790ef..fddd21f42 100644 --- a/lib/damask/asciitable.py +++ b/lib/damask/asciitable.py @@ -4,12 +4,9 @@ import os,sys import numpy as np -import util class ASCIItable(): - ''' - There should be a doc string here :) - ''' + """Read and write to ASCII tables""" __slots__ = ['__IO__', 'info', @@ -28,7 +25,7 @@ class ASCIItable(): readonly = False, # no reading from file ): self.__IO__ = {'output': [], - 'buffered': buffered, + 'buffered': buffered, 'labeled': labeled, # header contains labels 'labels': [], # labels according to file info 'readBuffer': [], # buffer to hold non-advancing reads @@ -38,18 +35,18 @@ class ASCIItable(): self.__IO__['inPlace'] = not outname and name and not readonly if self.__IO__['inPlace']: outname = name + self.tmpext # transparently create tmp file try: - self.__IO__['in'] = (open( name,'r') if os.access( name, os.R_OK) else None) if name else sys.stdin + self.__IO__['in'] = (open( name,'r') if os.access( name, os.R_OK) else None) if name else sys.stdin except TypeError: self.__IO__['in'] = name try: - self.__IO__['out'] = (open(outname,'w') if (not os.path.isfile(outname) \ - or os.access( outname, os.W_OK) \ - ) \ - and (not self.__IO__['inPlace'] \ - or not os.path.isfile(name) \ - or os.access( name, os.W_OK) \ - ) else None) if outname else sys.stdout + self.__IO__['out'] = (open(outname,'w') if (not os.path.isfile(outname) or + os.access( outname, os.W_OK) + ) and + (not self.__IO__['inPlace'] or + not os.path.isfile(name) or + os.access( name, os.W_OK) + ) else None) if outname else sys.stdout except TypeError: self.__IO__['out'] = outname @@ -58,8 +55,8 @@ class ASCIItable(): self.data = [] self.line = '' - if self.__IO__['in'] == None \ - or self.__IO__['out'] == None: raise IOError # complain if any required file access not possible + if self.__IO__['in'] is None \ + or self.__IO__['out'] is None: raise IOError # complain if any required file access not possible # ------------------------------------------------------------------ def _transliterateToFloat(self, @@ -86,9 +83,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def output_write(self, what): - ''' - aggregate a single row (string) or list of (possibly containing further lists of) rows into output - ''' + """aggregate a single row (string) or list of (possibly containing further lists of) rows into output""" if not isinstance(what, (str, unicode)): try: for item in what: self.output_write(item) @@ -104,7 +99,7 @@ class ASCIItable(): clear = True): try: self.__IO__['output'] == [] or self.__IO__['out'].write('\n'.join(self.__IO__['output']) + '\n') - except IOError as e: + except IOError: return False if clear: self.output_clear() return True @@ -127,11 +122,12 @@ class ASCIItable(): # ------------------------------------------------------------------ def head_read(self): - ''' - get column labels by either reading - the first row or, if keyword "head[*]" is present, - the last line of the header - ''' + """ + get column labels by either reading + + the first row or, if keyword "head[*]" is present, + the last line of the header + """ import re try: @@ -180,10 +176,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def head_write(self, header = True): - ''' - write current header information (info + labels) - ''' - + """write current header information (info + labels)""" head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else [] head.append(self.info) if self.__IO__['labeled']: head.append('\t'.join(self.labels)) @@ -192,9 +185,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def head_getGeom(self): - ''' - interpret geom header - ''' + """interpret geom header""" identifiers = { 'grid': ['a','b','c'], 'size': ['x','y','z'], @@ -234,9 +225,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def head_putGeom(self,info): - ''' - translate geometry description to header - ''' + """translate geometry description to header""" self.info_append([ "grid\ta {}\tb {}\tc {}".format(*info['grid']), "size\tx {}\ty {}\tz {}".format(*info['size']), @@ -249,9 +238,7 @@ class ASCIItable(): def labels_append(self, what, reset = False): - ''' - add item or list to existing set of labels (and switch on labeling) - ''' + """add item or list to existing set of labels (and switch on labeling)""" if not isinstance(what, (str, unicode)): try: for item in what: self.labels_append(item) @@ -265,28 +252,27 @@ class ASCIItable(): # ------------------------------------------------------------------ def labels_clear(self): - ''' - delete existing labels and switch to no labeling - ''' + """delete existing labels and switch to no labeling""" self.labels = [] self.__IO__['labeled'] = False # ------------------------------------------------------------------ def label_index(self, labels): - ''' - tell index of column label(s). - return numpy array if asked for list of labels. - transparently deals with label positions implicitly given as numbers or their headings given as strings. - ''' + """ + tell index of column label(s). + + return numpy array if asked for list of labels. + transparently deals with label positions implicitly given as numbers or their headings given as strings. + """ from collections import Iterable if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested idx = [] for label in labels: - if label != None: + if label is not None: try: - idx.append(int(label)) # column given as integer number? + idx.append(int(label)-1) # column given as integer number? except ValueError: try: idx.append(self.labels.index(label)) # locate string in label list @@ -297,7 +283,7 @@ class ASCIItable(): idx.append(-1) # not found... else: try: - idx = int(labels) + idx = int(labels)-1 # offset for python array indexing except ValueError: try: idx = self.labels.index(labels) @@ -305,28 +291,28 @@ class ASCIItable(): try: idx = self.labels.index('1_'+labels) # locate '1_'+string in label list except ValueError: - idx = None if labels == None else -1 + idx = None if labels is None else -1 - return np.array(idx) if isinstance(idx,list) else idx + return np.array(idx) if isinstance(idx,Iterable) else idx # ------------------------------------------------------------------ def label_dimension(self, labels): - ''' - tell dimension (length) of column label(s). - return numpy array if asked for list of labels. - transparently deals with label positions implicitly given as numbers or their headings given as strings. - ''' + """ + tell dimension (length) of column label(s). + return numpy array if asked for list of labels. + transparently deals with label positions implicitly given as numbers or their headings given as strings. + """ from collections import Iterable if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested dim = [] for label in labels: - if label != None: + if label is not None: myDim = -1 try: # column given as number? - idx = int(label) + idx = int(label)-1 myDim = 1 # if found has at least dimension 1 if self.labels[idx].startswith('1_'): # column has multidim indicator? while idx+myDim < len(self.labels) and self.labels[idx+myDim].startswith("%i_"%(myDim+1)): @@ -345,7 +331,7 @@ class ASCIItable(): dim = -1 # assume invalid label idx = -1 try: # column given as number? - idx = int(labels) + idx = int(labels)-1 dim = 1 # if found has at least dimension 1 if self.labels[idx].startswith('1_'): # column has multidim indicator? while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)): @@ -359,17 +345,17 @@ class ASCIItable(): while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)): dim += 1 # keep adding while going through object - return np.array(dim) if isinstance(dim,list) else dim + return np.array(dim) if isinstance(dim,Iterable) else dim # ------------------------------------------------------------------ def label_indexrange(self, labels): - ''' - tell index range for given label(s). - return numpy array if asked for list of labels. - transparently deals with label positions implicitly given as numbers or their headings given as strings. - ''' + """ + tell index range for given label(s). + return numpy array if asked for list of labels. + transparently deals with label positions implicitly given as numbers or their headings given as strings. + """ from collections import Iterable start = self.label_index(labels) @@ -377,13 +363,11 @@ class ASCIItable(): return map(lambda a,b: xrange(a,a+b), zip(start,dim)) if isinstance(labels, Iterable) and not isinstance(labels, str) \ else xrange(start,start+dim) - + # ------------------------------------------------------------------ def info_append(self, what): - ''' - add item or list to existing set of infos - ''' + """add item or list to existing set of infos""" if not isinstance(what, (str, unicode)): try: for item in what: self.info_append(item) @@ -394,9 +378,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def info_clear(self): - ''' - delete any info block - ''' + """delete any info block""" self.info = [] # ------------------------------------------------------------------ @@ -409,9 +391,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def data_skipLines(self, count): - ''' - wind forward by count number of lines - ''' + """wind forward by count number of lines""" for i in xrange(count): alive = self.data_read() @@ -421,9 +401,7 @@ class ASCIItable(): def data_read(self, advance = True, respectLabels = True): - ''' - read next line (possibly buffered) and parse it into data array - ''' + """read next line (possibly buffered) and parse it into data array""" self.line = self.__IO__['readBuffer'].pop(0) if len(self.__IO__['readBuffer']) > 0 \ else self.__IO__['in'].readline().strip() # take buffered content or get next data row from file @@ -434,7 +412,7 @@ class ASCIItable(): if self.__IO__['labeled'] and respectLabels: # if table has labels items = self.line.split()[:len(self.__IO__['labels'])] # use up to label count (from original file info) - self.data = items if len(items) == len(self.__IO__['labels']) else [] # take entries if correct number, i.e. not too few compared to label count + self.data = items if len(items) == len(self.__IO__['labels']) else [] # take entries if label count matches else: self.data = self.line.split() # otherwise take all @@ -443,9 +421,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def data_readArray(self, labels = []): - ''' - read whole data of all (given) labels as numpy array - ''' + """read whole data of all (given) labels as numpy array""" from collections import Iterable try: @@ -453,7 +429,7 @@ class ASCIItable(): except: pass # assume/hope we are at data start already... - if labels == None or labels == []: + if labels is None or labels == []: use = None # use all columns (and keep labels intact) labels_missing = [] else: @@ -467,9 +443,10 @@ class ASCIItable(): columns = [] for i,(c,d) in enumerate(zip(indices[present],dimensions[present])): # for all valid labels ... + # ... transparently add all components unless column referenced by number or with explicit dimension columns += range(c,c + \ (d if str(c) != str(labels[present[i]]) else \ - 1)) # ... transparently add all components unless column referenced by number or with explicit dimension + 1)) use = np.array(columns) self.labels = list(np.array(self.labels)[use]) # update labels with valid subset @@ -481,9 +458,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def data_write(self, delimiter = '\t'): - ''' - write current data array and report alive output back - ''' + """write current data array and report alive output back""" if len(self.data) == 0: return True if isinstance(self.data[0],list): @@ -495,9 +470,7 @@ class ASCIItable(): def data_writeArray(self, fmt = None, delimiter = '\t'): - ''' - write whole numpy array data - ''' + """write whole numpy array data""" for row in self.data: try: output = [fmt % value for value in row] if fmt else map(repr,row) @@ -520,9 +493,7 @@ class ASCIItable(): # ------------------------------------------------------------------ def data_set(self, what, where): - ''' - update data entry in column "where". grows data array if needed. - ''' + """update data entry in column "where". grows data array if needed.""" idx = -1 try: idx = self.label_index(where) @@ -546,25 +517,27 @@ class ASCIItable(): # ------------------------------------------------------------------ def microstructure_read(self, - grid): - ''' - read microstructure data (from .geom format) - ''' - - N = grid.prod() # expected number of microstructure indices in data - microstructure = np.zeros(N,'i') # initialize as flat array + grid, + type = 'i', + strict = False): + """read microstructure data (from .geom format)""" + def datatype(item): + return int(item) if type.lower() == 'i' else float(item) + + N = grid.prod() # expected number of microstructure indices in data + microstructure = np.zeros(N,type) # initialize as flat array i = 0 while i < N and self.data_read(): items = self.data if len(items) > 2: - if items[1].lower() == 'of': items = [int(items[2])]*int(items[0]) - elif items[1].lower() == 'to': items = range(int(items[0]),1+int(items[2])) - else: items = map(int,items) - else: items = map(int,items) + if items[1].lower() == 'of': items = np.ones(datatype(items[0]))*datatype(items[2]) + elif items[1].lower() == 'to': items = np.arange(datatype(items[0]),1+datatype(items[2])) + else: items = map(datatype,items) + else: items = map(datatype,items) - s = min(len(items), N-i) # prevent overflow of microstructure array + s = min(len(items), N-i) # prevent overflow of microstructure array microstructure[i:i+s] = items[:s] - i += s + i += len(items) - return microstructure + return (microstructure, i == N and not self.data_read()) if strict else microstructure # check for proper point count and end of file diff --git a/lib/damask/colormaps.py b/lib/damask/colormaps.py index f0045f4a7..314581471 100644 --- a/lib/damask/colormaps.py +++ b/lib/damask/colormaps.py @@ -5,11 +5,12 @@ import math,numpy as np ### --- COLOR CLASS -------------------------------------------------- class Color(): - ''' - Conversion of colors between different color-spaces. Colors should be given in the form - Color('model',[vector]).To convert and copy color from one space to other, use the methods - convertTo('model') and expressAs('model')spectively - ''' + """Conversion of colors between different color-spaces. + + Colors should be given in the form + Color('model',[vector]).To convert and copy color from one space to other, use the methods + convertTo('model') and expressAs('model')spectively + """ __slots__ = [ 'model', @@ -17,7 +18,7 @@ class Color(): ] - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def __init__(self, model = 'RGB', color = np.zeros(3,'d')): @@ -32,30 +33,32 @@ class Color(): model = model.upper() if model not in self.__transforms__.keys(): model = 'RGB' - if model == 'RGB' and max(color) > 1.0: # are we RGB255 ? + if model == 'RGB' and max(color) > 1.0: # are we RGB255 ? for i in range(3): - color[i] /= 255.0 # rescale to RGB + color[i] /= 255.0 # rescale to RGB - if model == 'HSL': # are we HSL ? - if abs(color[0]) > 1.0: color[0] /= 360.0 # with angular hue? - while color[0] >= 1.0: color[0] -= 1.0 # rewind to proper range - while color[0] < 0.0: color[0] += 1.0 # rewind to proper range + if model == 'HSL': # are we HSL ? + if abs(color[0]) > 1.0: color[0] /= 360.0 # with angular hue? + while color[0] >= 1.0: color[0] -= 1.0 # rewind to proper range + while color[0] < 0.0: color[0] += 1.0 # rewind to proper range self.model = model self.color = np.array(color,'d') - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def __repr__(self): + """Color model and values""" return 'Model: %s Color: %s'%(self.model,str(self.color)) - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def __str__(self): + """Color model and values""" return self.__repr__() - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def convertTo(self,toModel = 'RGB'): toModel = toModel.upper() if toModel not in self.__transforms__.keys(): return @@ -73,17 +76,19 @@ class Color(): return self - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def expressAs(self,asModel = 'RGB'): return self.__class__(self.model,self.color).convertTo(asModel) - # ------------------------------------------------------------------ - # convert H(ue) S(aturation) L(uminance) to R(red) G(reen) B(lue) - # with S,L,H,R,G,B running from 0 to 1 - # from http://en.wikipedia.org/wiki/HSL_and_HSV - def _HSL2RGB(self): + def _HSL2RGB(self): + """ + convert H(ue) S(aturation) L(uminance) to R(red) G(reen) B(lue) + + with S,L,H,R,G,B running from 0 to 1 + from http://en.wikipedia.org/wiki/HSL_and_HSV + """ if self.model != 'HSL': return sextant = self.color[0]*6.0 @@ -102,13 +107,14 @@ class Color(): self.model = converted.model self.color = converted.color - - # ------------------------------------------------------------------ - # convert R(ed) G(reen) B(lue) to H(ue) S(aturation) L(uminance) - # with S,L,H,R,G,B running from 0 to 1 - # from http://130.113.54.154/~monger/hsl-rgb.html + def _RGB2HSL(self): - + """ + convert R(ed) G(reen) B(lue) to H(ue) S(aturation) L(uminance) + + with S,L,H,R,G,B running from 0 to 1 + from http://130.113.54.154/~monger/hsl-rgb.html + """ if self.model != 'RGB': return HSL = np.zeros(3,'d') @@ -129,7 +135,7 @@ class Color(): HSL[0] = 2.0 + (self.color[2] - self.color[0])/(maxcolor - mincolor) elif (maxcolor == self.color[2]): HSL[0] = 4.0 + (self.color[0] - self.color[1])/(maxcolor - mincolor) - HSL[0] = HSL[0]*60.0 # is it necessary to scale to 360 hue values? might be dangerous for small values <1..! + HSL[0] = HSL[0]*60.0 # scaling to 360 might be dangerous for small values if (HSL[0] < 0.0): HSL[0] = HSL[0] + 360.0 for i in xrange(2): @@ -141,12 +147,14 @@ class Color(): self.color = converted.color - # ------------------------------------------------------------------ - # convert R(ed) G(reen) B(lue) to CIE XYZ - # with all values in the range of 0 to 1 - # from http://www.cs.rit.edu/~ncs/color/t_convert.html - def _RGB2XYZ(self): + def _RGB2XYZ(self): + """ + convert R(ed) G(reen) B(lue) to CIE XYZ + + with all values in the range of 0 to 1 + from http://www.cs.rit.edu/~ncs/color/t_convert.html + """ if self.model != 'RGB': return XYZ = np.zeros(3,'d') @@ -168,12 +176,14 @@ class Color(): self.color = converted.color - # ------------------------------------------------------------------ - # convert CIE XYZ to R(ed) G(reen) B(lue) - # with all values in the range of 0 to 1 - # from http://www.cs.rit.edu/~ncs/color/t_convert.html - def _XYZ2RGB(self): + def _XYZ2RGB(self): + """ + convert CIE XYZ to R(ed) G(reen) B(lue) + + with all values in the range of 0 to 1 + from http://www.cs.rit.edu/~ncs/color/t_convert.html + """ if self.model != 'XYZ': return convert = np.array([[ 3.240479,-1.537150,-0.498535], @@ -189,7 +199,7 @@ class Color(): RGB[i] = min(RGB[i],1.0) RGB[i] = max(RGB[i],0.0) - maxVal = max(RGB) # clipping colors according to the display gamut + maxVal = max(RGB) # clipping colors according to the display gamut if (maxVal > 1.0): RGB /= maxVal converted = Color('RGB', RGB) @@ -197,15 +207,17 @@ class Color(): self.color = converted.color - # ------------------------------------------------------------------ - # convert CIE Lab to CIE XYZ - # with XYZ in the range of 0 to 1 - # from http://www.easyrgb.com/index.php?X=MATH&H=07#text7 - def _CIELAB2XYZ(self): + def _CIELAB2XYZ(self): + """ + convert CIE Lab to CIE XYZ + + with XYZ in the range of 0 to 1 + from http://www.easyrgb.com/index.php?X=MATH&H=07#text7 + """ if self.model != 'CIELAB': return - ref_white = np.array([.95047, 1.00000, 1.08883],'d') # Observer = 2, Illuminant = D65 + ref_white = np.array([.95047, 1.00000, 1.08883],'d') # Observer = 2, Illuminant = D65 XYZ = np.zeros(3,'d') XYZ[1] = (self.color[0] + 16.0 ) / 116.0 @@ -220,16 +232,16 @@ class Color(): self.model = converted.model self.color = converted.color - - # ------------------------------------------------------------------ - # convert CIE XYZ to CIE Lab - # with XYZ in the range of 0 to 1 - # from http://en.wikipedia.org/wiki/Lab_color_space, http://www.cs.rit.edu/~ncs/color/t_convert.html def _XYZ2CIELAB(self): + """ + convert CIE XYZ to CIE Lab + with XYZ in the range of 0 to 1 + from http://en.wikipedia.org/wiki/Lab_color_space, http://www.cs.rit.edu/~ncs/color/t_convert.html + """ if self.model != 'XYZ': return - ref_white = np.array([.95047, 1.00000, 1.08883],'d') # Observer = 2, Illuminant = D65 + ref_white = np.array([.95047, 1.00000, 1.08883],'d') # Observer = 2, Illuminant = D65 XYZ = self.color/ref_white for i in xrange(len(XYZ)): @@ -242,12 +254,13 @@ class Color(): self.model = converted.model self.color = converted.color - - # ------------------------------------------------------------------ - # convert CIE Lab to Msh colorspace - # from http://www.cs.unm.edu/~kmorel/documents/ColorMaps/DivergingColorMapWorkshop.xls + def _CIELAB2MSH(self): - + """ + convert CIE Lab to Msh colorspace + + from http://www.cs.unm.edu/~kmorel/documents/ColorMaps/DivergingColorMapWorkshop.xls + """ if self.model != 'CIELAB': return Msh = np.zeros(3,'d') @@ -261,13 +274,14 @@ class Color(): self.model = converted.model self.color = converted.color - - # ------------------------------------------------------------------ - # convert Msh colorspace to CIE Lab - # s,h in radians - # from http://www.cs.unm.edu/~kmorel/documents/ColorMaps/DivergingColorMapWorkshop.xls - def _MSH2CIELAB(self): + def _MSH2CIELAB(self): + """ + convert Msh colorspace to CIE Lab + + s,h in radians + from http://www.cs.unm.edu/~kmorel/documents/ColorMaps/DivergingColorMapWorkshop.xls + """ if self.model != 'MSH': return Lab = np.zeros(3,'d') @@ -280,13 +294,8 @@ class Color(): self.color = converted.color - -### --- COLORMAP CLASS ----------------------------------------------- - class Colormap(): - ''' - perceptually uniform diverging or sequential colormaps. - ''' + """perceptually uniform diverging or sequential colormaps.""" __slots__ = [ 'left', @@ -294,20 +303,40 @@ class Colormap(): 'interpolate', ] __predefined__ = { - 'gray': {'left': Color('HSL',[0,1,1]), 'right': Color('HSL',[0,0,0.15]), 'interpolate': 'perceptualuniform'}, - 'grey': {'left': Color('HSL',[0,1,1]), 'right': Color('HSL',[0,0,0.15]), 'interpolate': 'perceptualuniform'}, - 'red': {'left': Color('HSL',[0,1,0.14]), 'right': Color('HSL',[0,0.35,0.91]), 'interpolate': 'perceptualuniform'}, - 'green': {'left': Color('HSL',[0.33333,1,0.14]), 'right': Color('HSL',[0.33333,0.35,0.91]), 'interpolate': 'perceptualuniform'}, - 'blue': {'left': Color('HSL',[0.66,1,0.14]), 'right': Color('HSL',[0.66,0.35,0.91]), 'interpolate': 'perceptualuniform'}, - 'seaweed': {'left': Color('HSL',[0.78,1.0,0.1]), 'right': Color('HSL',[0.40000,0.1,0.9]), 'interpolate': 'perceptualuniform'}, - 'bluebrown': {'left': Color('HSL',[0.65,0.53,0.49]), 'right': Color('HSL',[0.11,0.75,0.38]), 'interpolate': 'perceptualuniform'}, - 'redgreen': {'left': Color('HSL',[0.97,0.96,0.36]), 'right': Color('HSL',[0.33333,1.0,0.14]), 'interpolate': 'perceptualuniform'}, - 'bluered': {'left': Color('HSL',[0.65,0.53,0.49]), 'right': Color('HSL',[0.97,0.96,0.36]), 'interpolate': 'perceptualuniform'}, - 'blueredrainbow':{'left': Color('HSL',[2.0/3.0,1,0.5]), 'right': Color('HSL',[0,1,0.5]), 'interpolate': 'linear' }, + 'gray': {'left': Color('HSL',[0,1,1]), + 'right': Color('HSL',[0,0,0.15]), + 'interpolate': 'perceptualuniform'}, + 'grey': {'left': Color('HSL',[0,1,1]), + 'right': Color('HSL',[0,0,0.15]), + 'interpolate': 'perceptualuniform'}, + 'red': {'left': Color('HSL',[0,1,0.14]), + 'right': Color('HSL',[0,0.35,0.91]), + 'interpolate': 'perceptualuniform'}, + 'green': {'left': Color('HSL',[0.33333,1,0.14]), + 'right': Color('HSL',[0.33333,0.35,0.91]), + 'interpolate': 'perceptualuniform'}, + 'blue': {'left': Color('HSL',[0.66,1,0.14]), + 'right': Color('HSL',[0.66,0.35,0.91]), + 'interpolate': 'perceptualuniform'}, + 'seaweed': {'left': Color('HSL',[0.78,1.0,0.1]), + 'right': Color('HSL',[0.40000,0.1,0.9]), + 'interpolate': 'perceptualuniform'}, + 'bluebrown': {'left': Color('HSL',[0.65,0.53,0.49]), + 'right': Color('HSL',[0.11,0.75,0.38]), + 'interpolate': 'perceptualuniform'}, + 'redgreen': {'left': Color('HSL',[0.97,0.96,0.36]), + 'right': Color('HSL',[0.33333,1.0,0.14]), + 'interpolate': 'perceptualuniform'}, + 'bluered': {'left': Color('HSL',[0.65,0.53,0.49]), + 'right': Color('HSL',[0.97,0.96,0.36]), + 'interpolate': 'perceptualuniform'}, + 'blueredrainbow':{'left': Color('HSL',[2.0/3.0,1,0.5]), + 'right': Color('HSL',[0,1,0.5]), + 'interpolate': 'linear' }, } - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def __init__(self, left = Color('RGB',[1,1,1]), right = Color('RGB',[0,0,0]), @@ -330,26 +359,27 @@ class Colormap(): self.interpolate = interpolate - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def __repr__(self): + """left and right value of colormap""" return 'Left: %s Right: %s'%(self.left,self.right) - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def invert(self): (self.left, self.right) = (self.right, self.left) return self - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def color(self,fraction = 0.5): def interpolate_Msh(lo, hi, frac): def rad_diff(a,b): return abs(a[2]-b[2]) - - def adjust_hue(Msh_sat, Msh_unsat): # if saturation of one of the two colors is too less than the other, hue of the less +# if saturation of one of the two colors is too less than the other, hue of the less + def adjust_hue(Msh_sat, Msh_unsat): if Msh_sat[0] >= Msh_unsat[0]: return Msh_sat[2] else: @@ -375,10 +405,11 @@ class Colormap(): return Color('MSH',Msh) def interpolate_linear(lo, hi, frac): - ''' - linearly interpolate color at given fraction between lower and higher color in model of lower color - ''' - + """ + linearly interpolate color at given fraction between lower and + + higher color in model of lower color + """ interpolation = (1.0 - frac) * np.array(lo.color[:]) \ + frac * np.array(hi.expressAs(lo.model).color[:]) @@ -393,23 +424,23 @@ class Colormap(): else: raise NameError('unknown color interpolation method') - # ------------------------------------------------------------------ +# ------------------------------------------------------------------ def export(self,name = 'uniformPerceptualColorMap',\ format = 'paraview',\ steps = 2,\ crop = [-1.0,1.0], model = 'RGB'): - ''' + """ [RGB] colormap for use in paraview or gmsh, or as raw string, or array. + arguments: name, format, steps, crop. format is one of (paraview, gmsh, raw, list). crop selects a (sub)range in [-1.0,1.0]. generates sequential map if one limiting color is either white or black, diverging map otherwise. - ''' - - format = format.lower() # consistent comparison basis - frac = 0.5*(np.array(crop) + 1.0) # rescale crop range to fractions + """ + format = format.lower() # consistent comparison basis + frac = 0.5*(np.array(crop) + 1.0) # rescale crop range to fractions colors = [self.color(float(i)/(steps-1)*(frac[1]-frac[0])+frac[0]).expressAs(model).color for i in xrange(steps)] if format == 'paraview': @@ -439,4 +470,4 @@ class Colormap(): raise NameError('unknown color export format') return '\n'.join(colormap) + '\n' if type(colormap[0]) is str else colormap - \ No newline at end of file + diff --git a/lib/damask/config/__init__.py b/lib/damask/config/__init__.py index 3c75ebd19..f3635d2a9 100644 --- a/lib/damask/config/__init__.py +++ b/lib/damask/config/__init__.py @@ -1,5 +1,5 @@ # -*- coding: UTF-8 no BOM -*- -# $Id$ +"""Aggregator for configuration file handling""" -from .material import Material +from .material import Material # noqa diff --git a/lib/damask/config/material.py b/lib/damask/config/material.py index 09b716e04..bca005e0e 100644 --- a/lib/damask/config/material.py +++ b/lib/damask/config/material.py @@ -100,20 +100,19 @@ class Texture(Section): class Material(): - - ''' - Reads, manipulates and writes material.config files - ''' + """Reads, manipulates and writes material.config files""" + __slots__ = ['data'] def __init__(self,verbose=True): + """generates ordered list of parts""" self.parts = [ 'homogenization', 'microstructure', 'crystallite', 'phase', 'texture', - ] # ordered (!) list of parts + ] self.data = {\ 'homogenization': {'__order__': []}, 'microstructure': {'__order__': []}, @@ -124,6 +123,7 @@ class Material(): self.verbose = verbose def __repr__(self): + """returns current configuration to be used as material.config""" me = [] for part in self.parts: if self.verbose: print('doing '+part) @@ -144,7 +144,6 @@ class Material(): re_sec = re.compile(r'^\[(.+)\]$') # pattern for section name_section = '' - idx_section = 0 active = False for line in content: @@ -197,8 +196,7 @@ class Material(): return saveFile def add_section(self, part=None, section=None, initialData=None, merge = False): - '''adding/updating''' - + """adding/updating""" part = part.lower() section = section.lower() if part not in self.parts: raise Exception('invalid part %s'%part) @@ -227,10 +225,10 @@ class Material(): def add_microstructure(self, section='', components={}, # dict of phase,texture, and fraction lists ): - ''' Experimental! Needs expansion to multi-constituent microstructures...''' - + """Experimental! Needs expansion to multi-constituent microstructures...""" microstructure = Microstructure() - components=dict((k.lower(), v) for k,v in components.iteritems()) # make keys lower case (http://stackoverflow.com/questions/764235/dictionary-to-lowercase-in-python) + # make keys lower case (http://stackoverflow.com/questions/764235/dictionary-to-lowercase-in-python) + components=dict((k.lower(), v) for k,v in components.iteritems()) for key in ['phase','texture','fraction','crystallite']: if type(components[key]) is not list: @@ -245,7 +243,8 @@ class Material(): except AttributeError: pass - for (phase,texture,fraction,crystallite) in zip(components['phase'],components['texture'],components['fraction'],components['crystallite']): + for (phase,texture,fraction,crystallite) in zip(components['phase'],components['texture'], + components['fraction'],components['crystallite']): microstructure.add_multiKey('constituent','phase %i\ttexture %i\tfraction %g\ncrystallite %i'%( self.data['phase']['__order__'].index(phase)+1, self.data['texture']['__order__'].index(texture)+1, @@ -259,8 +258,8 @@ class Material(): section=None, key=None, value=None): - if type(value) is not type([]): - if type(value) is not type('s'): + if not isinstance(value,list): + if not isinstance(value,str): value = '%s'%value value = [value] newlen = len(value) @@ -271,17 +270,3 @@ class Material(): if newlen is not oldlen: print('Length of value was changed from %i to %i!'%(oldlen,newlen)) - - -def ex1(): - mat=Material() - p=Phase({'constitution':'lump'}) - t=Texture() - t.add_component('gauss',{'eulers':[1,2,3]}) - mat.add_section('phase','phase1',p) - mat.add_section('texture','tex1',t) - mat.add_microstructure('mustruct1',{'phase':['phase1']*2,'texture':['tex1']*2,'fraction':[0.2]*2}) - print(mat) - mat.write(file='poop') - mat.write(file='poop',overwrite=True) - diff --git a/lib/damask/environment.py b/lib/damask/environment.py index 30fa7b31d..cd9b247e8 100644 --- a/lib/damask/environment.py +++ b/lib/damask/environment.py @@ -2,7 +2,7 @@ # $Id$ -import os,sys,string,re,subprocess,shlex +import os,subprocess,shlex class Environment(): __slots__ = [ \ diff --git a/lib/damask/geometry/__init__.py b/lib/damask/geometry/__init__.py index 3d35ad4fe..51199965b 100644 --- a/lib/damask/geometry/__init__.py +++ b/lib/damask/geometry/__init__.py @@ -1,7 +1,7 @@ # -*- coding: UTF-8 no BOM -*- -# $Id$ +"""Aggregator for geometry handling""" -from .geometry import Geometry # only one class -from .spectral import Spectral # only one class -from .marc import Marc # only one class +from .geometry import Geometry # noqa +from .spectral import Spectral # noqa +from .marc import Marc # noqa diff --git a/lib/damask/geometry/geometry.py b/lib/damask/geometry/geometry.py index 83a7c58a6..25a676b83 100644 --- a/lib/damask/geometry/geometry.py +++ b/lib/damask/geometry/geometry.py @@ -5,10 +5,11 @@ import damask.geometry class Geometry(): - ''' - General class for geometry parsing. - Sub-classed by the individual solvers. - ''' + """ + General class for geometry parsing. + + Sub-classed by the individual solvers. + """ def __init__(self,solver=''): solverClass = { diff --git a/lib/damask/orientation.py b/lib/damask/orientation.py index c3f54dffa..886cd5a36 100644 --- a/lib/damask/orientation.py +++ b/lib/damask/orientation.py @@ -9,7 +9,6 @@ import numpy as np # ****************************************************************************************** class Rodrigues: -# ****************************************************************************************** def __init__(self, vector = np.zeros(3)): self.vector = vector @@ -28,20 +27,22 @@ class Rodrigues: # ****************************************************************************************** class Quaternion: -# ****************************************************************************************** - # All methods and naming conventions based off - # http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions + """ + Orientation represented as unit quaternion + + All methods and naming conventions based on http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions - # w is the real part, (x, y, z) are the imaginary parts - - # Representation of rotation is in ACTIVE form! - # (derived directly or through angleAxis, Euler angles, or active matrix) - # vector "a" (defined in coordinate system "A") is actively rotated to new coordinates "b" - # b = Q * a - # b = np.dot(Q.asMatrix(),a) + w is the real part, (x, y, z) are the imaginary parts + Representation of rotation is in ACTIVE form! + (derived directly or through angleAxis, Euler angles, or active matrix) + vector "a" (defined in coordinate system "A") is actively rotated to new coordinates "b" + b = Q * a + b = np.dot(Q.asMatrix(),a) + """ def __init__(self, quatArray = [1.0,0.0,0.0,0.0]): + """initializes to identity if not given""" self.w, \ self.x, \ self.y, \ @@ -49,19 +50,23 @@ class Quaternion: self.homomorph() def __iter__(self): + """components""" return iter([self.w,self.x,self.y,self.z]) def __copy__(self): + """create copy""" Q = Quaternion([self.w,self.x,self.y,self.z]) return Q copy = __copy__ def __repr__(self): + """readbable string""" return 'Quaternion(real=%+.6f, imag=<%+.6f, %+.6f, %+.6f>)' % \ (self.w, self.x, self.y, self.z) def __pow__(self, exponent): + """power""" omega = math.acos(self.w) vRescale = math.sin(exponent*omega)/math.sin(omega) Q = Quaternion() @@ -72,6 +77,7 @@ class Quaternion: return Q def __ipow__(self, exponent): + """in place power""" omega = math.acos(self.w) vRescale = math.sin(exponent*omega)/math.sin(omega) self.w = np.cos(exponent*omega) @@ -81,6 +87,7 @@ class Quaternion: return self def __mul__(self, other): + """multiplication""" try: # quaternion Aw = self.w Ax = self.x @@ -128,6 +135,7 @@ class Quaternion: return self.copy() def __imul__(self, other): + """in place multiplication""" try: # Quaternion Ax = self.x Ay = self.y @@ -145,6 +153,7 @@ class Quaternion: return self def __div__(self, other): + """division""" if isinstance(other, (int,float,long)): w = self.w / other x = self.x / other @@ -155,6 +164,7 @@ class Quaternion: return NotImplemented def __idiv__(self, other): + """in place division""" if isinstance(other, (int,float,long)): self.w /= other self.x /= other @@ -163,6 +173,7 @@ class Quaternion: return self def __add__(self, other): + """addition""" if isinstance(other, Quaternion): w = self.w + other.w x = self.x + other.x @@ -173,6 +184,7 @@ class Quaternion: return NotImplemented def __iadd__(self, other): + """in place division""" if isinstance(other, Quaternion): self.w += other.w self.x += other.x @@ -181,6 +193,7 @@ class Quaternion: return self def __sub__(self, other): + """subtraction""" if isinstance(other, Quaternion): Q = self.copy() Q.w -= other.w @@ -192,6 +205,7 @@ class Quaternion: return self.copy() def __isub__(self, other): + """in place subtraction""" if isinstance(other, Quaternion): self.w -= other.w self.x -= other.x @@ -200,6 +214,7 @@ class Quaternion: return self def __neg__(self): + """additive inverse""" self.w = -self.w self.x = -self.x self.y = -self.y @@ -207,6 +222,7 @@ class Quaternion: return self def __abs__(self): + """norm""" return math.sqrt(self.w ** 2 + \ self.x ** 2 + \ self.y ** 2 + \ @@ -215,6 +231,7 @@ class Quaternion: magnitude = __abs__ def __eq__(self,other): + """equal at e-8 precision""" return (abs(self.w-other.w) < 1e-8 and \ abs(self.x-other.x) < 1e-8 and \ abs(self.y-other.y) < 1e-8 and \ @@ -226,9 +243,11 @@ class Quaternion: abs(-self.z-other.z) < 1e-8) def __ne__(self,other): + """not equal at e-8 precision""" return not self.__eq__(self,other) def __cmp__(self,other): + """linear ordering""" return cmp(self.Rodrigues(),other.Rodrigues()) def magnitude_squared(self): @@ -290,9 +309,10 @@ class Quaternion: return np.outer([i for i in self],[i for i in self]) def asMatrix(self): - return np.array([[1.0-2.0*(self.y*self.y+self.z*self.z), 2.0*(self.x*self.y-self.z*self.w), 2.0*(self.x*self.z+self.y*self.w)], - [ 2.0*(self.x*self.y+self.z*self.w), 1.0-2.0*(self.x*self.x+self.z*self.z), 2.0*(self.y*self.z-self.x*self.w)], - [ 2.0*(self.x*self.z-self.y*self.w), 2.0*(self.x*self.w+self.y*self.z), 1.0-2.0*(self.x*self.x+self.y*self.y)]]) + return np.array( + [[1.0-2.0*(self.y*self.y+self.z*self.z), 2.0*(self.x*self.y-self.z*self.w), 2.0*(self.x*self.z+self.y*self.w)], + [ 2.0*(self.x*self.y+self.z*self.w), 1.0-2.0*(self.x*self.x+self.z*self.z), 2.0*(self.y*self.z-self.x*self.w)], + [ 2.0*(self.x*self.z-self.y*self.w), 2.0*(self.x*self.w+self.y*self.z), 1.0-2.0*(self.x*self.x+self.y*self.y)]]) def asAngleAxis(self, degrees = False): @@ -315,15 +335,17 @@ class Quaternion: return np.inf*np.ones(3) if self.w == 0.0 else np.array([self.x, self.y, self.z])/self.w def asEulers(self, - type = 'bunge', + type = "bunge", degrees = False, standardRange = False): - ''' + u""" + Orientation as Bunge-Euler angles + conversion of ACTIVE rotation to Euler angles taken from: Melcher, A.; Unser, A.; Reichhardt, M.; Nestler, B.; Pötschke, M.; Selzer, M. Conversion of EBSD data by a quaternion based algorithm to be used for grain structure simulations Technische Mechanik 30 (2010) pp 401--413 - ''' + """ angles = [0.0,0.0,0.0] if type.lower() == 'bunge' or type.lower() == 'zxz': @@ -369,7 +391,7 @@ class Quaternion: @classmethod def fromRandom(cls,randomSeed = None): - if randomSeed == None: + if randomSeed is None: randomSeed = int(os.urandom(4).encode('hex'), 16) np.random.seed(randomSeed) r = np.random.random(3) @@ -420,7 +442,6 @@ class Quaternion: y = - c1 * s2 * s3 + s1 * s2 * c3 z = c1 * c2 * s3 + s1 * c2 * c3 else: -# print 'unknown Euler convention' w = c1 * c2 * c3 - s1 * s2 * s3 x = s1 * s2 * c3 + c1 * c2 * s3 y = s1 * c2 * c3 + c1 * s2 * s3 @@ -428,7 +449,8 @@ class Quaternion: return cls([w,x,y,z]) -## Modified Method to calculate Quaternion from Orientation Matrix, Source: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ +# Modified Method to calculate Quaternion from Orientation Matrix, +# Source: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ @classmethod def fromMatrix(cls, m): @@ -482,8 +504,12 @@ class Quaternion: @classmethod def new_interpolate(cls, q1, q2, t): -# see http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20070017872_2007014421.pdf for (another?) way to interpolate quaternions - + """ + interpolation + + see http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20070017872_2007014421.pdf + for (another?) way to interpolate quaternions + """ assert isinstance(q1, Quaternion) and isinstance(q2, Quaternion) Q = cls() @@ -522,11 +548,11 @@ class Quaternion: # ****************************************************************************************** class Symmetry: -# ****************************************************************************************** lattices = [None,'orthorhombic','tetragonal','hexagonal','cubic',] def __init__(self, symmetry = None): + """lattice with given symmetry, defaults to None""" if isinstance(symmetry, basestring) and symmetry.lower() in Symmetry.lattices: self.lattice = symmetry.lower() else: @@ -534,29 +560,31 @@ class Symmetry: def __copy__(self): + """copy""" return self.__class__(self.lattice) copy = __copy__ def __repr__(self): + """readbable string""" return '%s' % (self.lattice) def __eq__(self, other): + """equal""" return self.lattice == other.lattice - def __neq__(self, other): + """not equal""" return not self.__eq__(other) def __cmp__(self,other): + """linear ordering""" return cmp(Symmetry.lattices.index(self.lattice),Symmetry.lattices.index(other.lattice)) def symmetryQuats(self,who = []): - ''' - List of symmetry operations as quaternions. - ''' + """List of symmetry operations as quaternions.""" if self.lattice == 'cubic': symQuats = [ [ 1.0, 0.0, 0.0, 0.0 ], @@ -629,18 +657,15 @@ class Symmetry: def equivalentQuaternions(self, quaternion, who = []): - ''' - List of symmetrically equivalent quaternions based on own symmetry. - ''' + """List of symmetrically equivalent quaternions based on own symmetry.""" return [quaternion*q for q in self.symmetryQuats(who)] def inFZ(self,R): - ''' - Check whether given Rodrigues vector falls into fundamental zone of own symmetry. - ''' + """Check whether given Rodrigues vector falls into fundamental zone of own symmetry.""" if isinstance(R, Quaternion): R = R.asRodrigues() # translate accidentially passed quaternion - R = abs(R) # fundamental zone in Rodrigues space is point symmetric around origin +# fundamental zone in Rodrigues space is point symmetric around origin + R = abs(R) if self.lattice == 'cubic': return math.sqrt(2.0)-1.0 >= R[0] \ and math.sqrt(2.0)-1.0 >= R[1] \ @@ -662,12 +687,13 @@ class Symmetry: def inDisorientationSST(self,R): - ''' + """ Check whether given Rodrigues vector (of misorientation) falls into standard stereographic triangle of own symmetry. + Determination of disorientations follow the work of A. Heinz and P. Neumann: Representation of Orientation and Disorientation Data for Cubic, Hexagonal, Tetragonal and Orthorhombic Crystals Acta Cryst. (1991). A47, 780-789 - ''' + """ if isinstance(R, Quaternion): R = R.asRodrigues() # translate accidentially passed quaternion epsilon = 0.0 @@ -691,11 +717,12 @@ class Symmetry: vector, proper = False, color = False): - ''' + """ Check whether given vector falls into standard stereographic triangle of own symmetry. + proper considers only vectors with z >= 0, hence uses two neighboring SSTs. Return inverse pole figure color if requested. - ''' + """ # basis = {'cubic' : np.linalg.inv(np.array([[0.,0.,1.], # direction of red # [1.,0.,1.]/np.sqrt(2.), # direction of green # [1.,1.,1.]/np.sqrt(3.)]).transpose()), # direction of blue @@ -752,15 +779,15 @@ class Symmetry: inSST = np.all(theComponents >= 0.0) else: v = np.array(vector,dtype = float) - if proper: # check both improper ... + if proper: # check both improper ... theComponents = np.dot(basis['improper'],v) inSST = np.all(theComponents >= 0.0) if not inSST: # ... and proper SST theComponents = np.dot(basis['proper'],v) inSST = np.all(theComponents >= 0.0) else: - v[2] = abs(v[2]) # z component projects identical for positive and negative values - theComponents = np.dot(basis['improper'],v) + v[2] = abs(v[2]) # z component projects identical + theComponents = np.dot(basis['improper'],v) # for positive and negative values inSST = np.all(theComponents >= 0.0) if color: # have to return color array @@ -781,7 +808,6 @@ class Symmetry: # ****************************************************************************************** class Orientation: -# ****************************************************************************************** __slots__ = ['quaternion','symmetry'] @@ -791,7 +817,7 @@ class Orientation: angleAxis = None, matrix = None, Eulers = None, - random = False, # put any integer to have a fixed seed or True for real random + random = False, # integer to have a fixed seed or True for real random symmetry = None, ): if random: # produce random orientation @@ -815,12 +841,14 @@ class Orientation: self.symmetry = Symmetry(symmetry) def __copy__(self): + """copy""" return self.__class__(quaternion=self.quaternion,symmetry=self.symmetry.lattice) copy = __copy__ def __repr__(self): + """value as all implemented representations""" return 'Symmetry: %s\n' % (self.symmetry) + \ 'Quaternion: %s\n' % (self.quaternion) + \ 'Matrix:\n%s\n' % ( '\n'.join(['\t'.join(map(str,self.asMatrix()[i,:])) for i in range(3)]) ) + \ @@ -863,10 +891,7 @@ class Orientation: self.equivalentQuaternions(who)) def reduced(self): - ''' - Transform orientation to fall into fundamental zone according to symmetry - ''' - + """Transform orientation to fall into fundamental zone according to symmetry""" for me in self.symmetry.equivalentQuaternions(self.quaternion): if self.symmetry.inFZ(me.asRodrigues()): break @@ -876,13 +901,13 @@ class Orientation: def disorientation(self, other, SST = True): - ''' + """ Disorientation between myself and given other orientation. + Rotation axis falls into SST if SST == True. (Currently requires same symmetry for both orientations. Look into A. Heinz and P. Neumann 1991 for cases with differing sym.) - ''' - + """ if self.symmetry != other.symmetry: raise TypeError('disorientation between different symmetry classes not supported yet.') misQ = self.quaternion.conjugated()*other.quaternion @@ -900,32 +925,27 @@ class Orientation: if breaker: break if breaker: break +# disorientation, own sym, other sym, self-->other: True, self<--other: False return (Orientation(quaternion = theQ,symmetry = self.symmetry.lattice), - i,j,k == 1) # disorientation, own sym, other sym, self-->other: True, self<--other: False + i,j,k == 1) def inversePole(self, axis, proper = False, SST = True): - ''' - axis rotated according to orientation (using crystal symmetry to ensure location falls into SST) - ''' - + """axis rotated according to orientation (using crystal symmetry to ensure location falls into SST)""" if SST: # pole requested to be within SST for i,q in enumerate(self.symmetry.equivalentQuaternions(self.quaternion)): # test all symmetric equivalent quaternions pole = q.conjugated()*axis # align crystal direction to axis - if self.symmetry.inSST(pole,proper): break # found SST version + if self.symmetry.inSST(pole,proper): break # found SST version else: pole = self.quaternion.conjugated()*axis # align crystal direction to axis return (pole,i if SST else 0) def IPFcolor(self,axis): - ''' - TSL color of inverse pole figure for given axis - ''' - + """TSL color of inverse pole figure for given axis""" color = np.zeros(3,'d') for q in self.symmetry.equivalentQuaternions(self.quaternion): @@ -939,7 +959,9 @@ class Orientation: def average(cls, orientations, multiplicity = []): - """RETURN THE AVERAGE ORIENTATION + """ + average orientation + ref: F. Landis Markley, Yang Cheng, John Lucas Crassidis, and Yaakov Oshman. Averaging Quaternions, Journal of Guidance, Control, and Dynamics, Vol. 30, No. 4 (2007), pp. 1193-1197. @@ -949,7 +971,6 @@ class Orientation: b = Orientation(Eulers=np.radians([20, 0, 0]), symmetry='hexagonal') avg = Orientation.average([a,b]) """ - if not all(isinstance(item, Orientation) for item in orientations): raise TypeError("Only instances of Orientation can be averaged.") @@ -960,8 +981,7 @@ class Orientation: reference = orientations[0] # take first as reference for i,(o,n) in enumerate(zip(orientations,multiplicity)): closest = o.equivalentOrientations(reference.disorientation(o,SST = False)[2])[0] # select sym orientation with lowest misorientation - M = closest.quaternion.asM() * n if i == 0 else M + closest.quaternion.asM() * n # add (multiples) of this orientation to average - + M = closest.quaternion.asM() * n if i == 0 else M + closest.quaternion.asM() * n # noqa add (multiples) of this orientation to average noqa eig, vec = np.linalg.eig(M/N) return Orientation(quaternion = Quaternion(quatArray = np.real(vec.T[eig.argmax()])), diff --git a/lib/damask/result.py b/lib/damask/result.py index e65fa2afe..36e4ba81b 100644 --- a/lib/damask/result.py +++ b/lib/damask/result.py @@ -11,10 +11,11 @@ except (ImportError) as e: sys.stderr.write('\nREMARK: h5py module not available \n\n') class Result(): - ''' - General class for result parsing. - Needs h5py to be installed - ''' + """ + General class for result parsing. + + Needs h5py to be installed + """ def __init__(self,resultsFile): self.data=h5py.File(resultsFile,"r") diff --git a/lib/damask/result/marc2vtk.py b/lib/damask/result/marc2vtk.py index 84c2a6313..a04c710d7 100644 --- a/lib/damask/result/marc2vtk.py +++ b/lib/damask/result/marc2vtk.py @@ -1,6 +1,5 @@ # -*- coding: UTF-8 no BOM -*- -# $Id$ # This tool converts a msc.marc result file into the vtk format that # can be viewed by Paraview software (Kitware), or MayaVi (needs xml-vtk, or ... # @@ -8,13 +7,8 @@ # Some example vtk files: http://people.sc.fsu.edu/~jburkardt/data/vtk/vtk.html # www.paraview.org -import os,sys,math,time,re -# python external -try: - import numpy as N - import numpy -except: - print('Could not import numpy.') +import os,sys,re +import numpy as np import py_post # MSC closed source module to access marc result files @@ -27,7 +21,7 @@ class MARC_POST(): self.fpath=os.path.join(self.projdir,self.postname) print('Trying to open ',self.fpath,' ...') self.p=py_post.post_open(self.fpath) - if self.p==None: + if self.p is None: print('Could not open %s.'%self.postname); #return 'err'#; sys.exit(1) raise Exception('Could not open t16') print('Postfile %s%s is open ...'%(self.projdir,self.postname)) @@ -105,7 +99,6 @@ class MARC_POST(): def writeNodes2VTK(self, fobj): self.points=[] self.VTKcnt=200 # number of values per line in vtk file - ndCnt=1 fobj.write('POINTS %i'%self.p.nodes()+' float\n') self.nodes_dict={} # store the node IDs in case of holes in the numbering for iNd in self.nodes: @@ -126,8 +119,6 @@ class MARC_POST(): el=self.p.element(iEl) cell_nodes=[] # for pyvtk ndlist=el.items - #for k in [0, 1, 3, 2, 4, 5, 7, 6]: # FOR CELL TPYE VTK_VOXEL - #for k in [0, 4, 3, 1, 5, 7, 6, 2]: for k in [0, 1, 2, 3, 4, 5, 6, 7]: # FOR CELL TYPE VTK_HEXAHEDRON node=ndlist[k]-1 cell_nodes.append(self.nodes_dict[node]) @@ -147,7 +138,6 @@ class MARC_POST(): fobj.write('\n');cnt=0 fobj.write('\n') print('Elements written to VTK: %i'%self.p.elements()) - #print('Nr of nodes: ',self.nodes) def writeElScalars2NodesVTK(self,fobj): fobj.write('\nPOINT_DATA %i\n'%self.p.nodes()) @@ -157,7 +147,6 @@ class MARC_POST(): fobj.write('LOOKUP_TABLE default\n') idxScal=self.nscal_list.index('Displacement Z') for iNd in self.nodes: - #fobj.write('%f %f '%(self.p.node_scalar(iNd,idxScal), N.random.rand())) fobj.write('%f '%(self.p.node_scalar(iNd,idxScal))) for iEl in range(0,self.nel): el=self.p.element(iEl) @@ -173,8 +162,6 @@ class MARC_POST(): def writeNodeScalars2VTK(self,fobj): fobj.write('\nPOINT_DATA %i\n'%self.p.nodes()) - nNdDat=self.nscals - nComponents=1+nNdDat self.pointDataScalars=[] for idxNdScal in range(-3,self.nscals): #now include node x,y,z if idxNdScal>=0: @@ -209,8 +196,6 @@ class MARC_POST(): idx_sig_vMises=self.getLabelNr('Equivalent Von Mises Stress') idx_sig33=self.getLabelNr('Comp 33 of Cauchy Stress') fobj.write('\nCELL_DATA %i\n'%self.p.elements()) - nElDat=self.elscals - nComponents=1+nElDat for idxElScal in range(0,self.elscals): datalabel=self.elscal_list[idxElScal] datalabel=re.sub("\s",'_',datalabel) @@ -250,19 +235,16 @@ class MARC_POST(): result.append(avgScal) return result - def writeUniaxiality2VTK(self,fobj): - #fobj.write('\nCELL_DATA %i\n'%self.p.elements()) + def writeUniaxiality2VTK(self,fobj): datalabel='uniaxiality_sig_vMises_durch_sig33' fobj.write('SCALARS %s float %i\n'%(datalabel,1)) fobj.write('LOOKUP_TABLE default\n') cnt=0 for iEl in range(0,self.nel): cnt=cnt+1 - #if abs(self.sig33[iEl])<1e-5: if abs(self.sig_vMises[iEl])<1e-5: datum=0. else: - #datum=self.sig_vMises[iEl]/self.sig33[iEl] datum=self.sig33[iEl]/self.sig_vMises[iEl] fobj.write('%E '%(datum)) if cnt>self.VTKcnt: @@ -283,8 +265,8 @@ class MARC_POST(): self.mean_stress.append(self.meanStress(sig)) def triaxiality_per_element(self): - # classical triaxiality - # 1/3 : uniax tension + # classical triaxiality + # 1/3 : uniax tension self.triaxiality=[] for iEl in range(0,self.nel): t=self.mean_stress[iEl]/self.sig_vMises[iEl] @@ -303,10 +285,6 @@ class MARC_POST(): fobj.write('\n') def calc_lode_parameter(self): - # [-1 ... +1] see e.g. Wippler & Boehlke triaxiality measures doi:10.1002/pamm.201010061 - # +1 : uniax tensile? - # 0 : shear - # -1 : uniax compr ? self.lode=[] try: self.stress @@ -328,10 +306,11 @@ class MARC_POST(): def princStress(self, stress): """ Function to compute 3D principal stresses and sort them. + from: http://geodynamics.org/svn/cig/short/3D/PyLith/trunk/playpen/postproc/vtkcff.py """ - stressMat=N.array(stress) - (princStress, princAxes) = numpy.linalg.eigh(stressMat) + stressMat=np.array(stress) + (princStress, princAxes) = np.linalg.eigh(stressMat) idx = princStress.argsort() princStressOrdered = princStress[idx] princAxesOrdered = princAxes[:,idx] @@ -339,36 +318,28 @@ class MARC_POST(): def avg_elten(self, idxElTen, mat=0, elID=None): - tensum=N.zeros((3,3)); - T=N.zeros((3,3)); + tensum=np.zeros((3,3)); + T=np.zeros((3,3)); pts=0; - avg=N.zeros((3,3)); - #print 'Element Scalars' - #print self.p.element_scalar_label(elscal2) - if elID==None: + avg=np.zeros((3,3)); + + if elID is None: averaged_elements=range(0,self.nel) else: averaged_elements=[elID] - #for i in range (0,self.nel): for i in averaged_elements: if mat==0 or int(self.p.element_scalar(i,4)[0].value)==mat: - eldata=self.p.element(i) T=self.p.element_tensor(i,idxElTen) for k in range (0,8): tensum[0][0] = tensum[0][0] + T[k].t11 tensum[0][1] = tensum[0][1] + T[k].t12 tensum[0][2] = tensum[0][2] + T[k].t13 - #tensum1[1][0] = tensum1[1][0] + T1[k].t21 tensum[1][1] = tensum[1][1] + T[k].t22 tensum[1][2] = tensum[1][2] + T[k].t23 - #tensum1[2][0] = tensum1[2][0] + T1[k].t31 - #tensum1[2][1] = tensum1[2][1] + T1[k].t32 tensum[2][2] = tensum[2][2] + T[k].t33 pts=pts+1 avg=tensum/pts - #print avg avg=self.fillComponents(avg) - #print avg del [T] return (avg,tensum,pts) @@ -384,7 +355,7 @@ class MARC_POST(): t=tensor33 s=(t[0,0]-t[1,1])**2+(t[1,1]-t[2,2])**2+(t[0,0]-t[2,2])**2+\ 6*(t[0,1]**2+t[1,2]**2+t[2,0]**2) - vM=N.sqrt(s/2.) + vM=np.sqrt(s/2.) return vM def meanStress(self,tensor33): @@ -397,8 +368,7 @@ class MARC_POST(): t=tensor33 I1=t[0,0]+t[1,1]+t[2,2] I2=t[0,0]*t[1,1]+t[1,1]*t[2,2]+t[0,0]*t[2,2]-\ - t[0,1]**2-t[1,2]**2-t[0,2]**2 - # I3 = det(t) + t[0,1]**2-t[1,2]**2-t[0,2]**2 I3=t[0,0]*t[1,1]*t[2,2]+\ 2*t[0,1]*t[1,2]*t[2,0]-\ t[2,2]*t[0,1]**2-t[0,0]*t[1,2]**2-t[1,1]*t[0,2]**2 @@ -406,17 +376,18 @@ class MARC_POST(): class VTK_WRITER(): - ''' - The resulting vtk-file can be imported in Paraview 3.12 - Then use Filters: Cell Data to Point Data + Contour - to plot semi-transparent iso-surfaces. - ''' + """ + The resulting vtk-file can be imported in Paraview 3.12 + + Then use Filters: Cell Data to Point Data + Contour + to plot semi-transparent iso-surfaces. + """ + import re def __init__(self): self.p=MARC_POST() # self.p def openFile(self, filename='test.vtp'): - #if not self.f:#==None: self.f=open(filename,'w+') self.fname=filename @@ -427,7 +398,7 @@ class VTK_WRITER(): dformat='ASCII', # BINARY | [ASCII] dtype='UNSTRUCTURED_GRID' # UNSTRUCTURED GRID ): - if vtkFile==None: + if vtkFile is None: vtkFile=self.f # First Line contains Data format version self.versionVTK=version @@ -440,7 +411,6 @@ class VTK_WRITER(): def marc2vtkBatch(self): for iori in range(1,63): - #self.p=msc_post.MSC_POST() self.p.postname='indent_fric0.3_R2.70_cA146.0_h0.320_ori%03i_OST_h19d.t16'%(iori) if os.path.exists(self.p.postname): self.marc2vtk(mode='fast', batchMode=1) @@ -496,14 +466,14 @@ class VTK_WRITER(): def scaleBar(self, length=1.0, posXYZ=[0., 0., 0.]): self.fsb=open('micronbar_l%.1f.vtp'%length,'w+') self.writeFirstLines(self.fsb, comment='micronbar') - pts=N.array([]) + pts=np.array([]) width=length*1. height=length*1. - wVec=N.array([0., width, 0.]) - lVec=N.array([length,0.,0.]) - hVec=N.array([0.,0.,height]) + wVec=np.array([0., width, 0.]) + lVec=np.array([length,0.,0.]) + hVec=np.array([0.,0.,height]) posXYZ=posXYZ-0.5*wVec-0.5*lVec#-0.5*hVec # CENTERING Y/N - posXYZ=N.array(posXYZ) + posXYZ=np.array(posXYZ) pts=[posXYZ, posXYZ+lVec, posXYZ+wVec, posXYZ+wVec+lVec] @@ -514,34 +484,22 @@ class VTK_WRITER(): self.fsb.write('%f %f %f\n'%(pts[npts][0], pts[npts][1], pts[npts][2])) if 1: #Triad nCells=3 - #nCells=1 #One Line ptsPerCell=2 # Lines (Type=3) - #ptsPerCell=4 # Quads (Type=9) - #ptsPerCell=8 # Hexahedron (Type=12) cellSize=(ptsPerCell+1)*nCells self.fsb.write('CELLS %i %i\n'%(nCells,cellSize)) self.fsb.write('2 0 1\n') #X-Line self.fsb.write('2 0 2\n') #Y-Line self.fsb.write('2 0 4\n') #Z-Line - #self.fsb.write('4 0 1 3 2\n') #Quad - #self.fsb.write('%i 0 1 3 2 4 5 7 6\n'%ptsPerCell) #Hexahedron self.fsb.write('CELL_TYPES %i\n'%(nCells)) self.fsb.write('3\n3\n3\n')#Line - #self.fsb.write('12\n')#Hexahedron else: # Cube, change posXYZ nCells=1 ptsPerCell=2 # Lines (Type=3) - #ptsPerCell=4 # Quads (Type=9) - #ptsPerCell=8 # Hexahedron (Type=12) cellSize=(ptsPerCell+1)*nCells self.fsb.write('CELLS %i %i\n'%(nCells,cellSize)) self.fsb.write('2 0 1\n') #Line - #self.fsb.write('4 0 1 3 2\n') #Quad - #self.fsb.write('%i 0 1 3 2 4 5 7 6\n'%ptsPerCell) #Hexahedron self.fsb.write('CELL_TYPES %i\n'%(nCells)) self.fsb.write('3\n')#Line - #self.fsb.write('12\n')#Hexahedron - self.fsb.write('\n') self.fsb.close() @@ -549,8 +507,7 @@ class VTK_WRITER(): def example_unstructured(self): self.openFile(filename='example_unstructured_grid.vtk') - #self.writeFirstLines() - self.f.write(''' + self.f.write(""" # vtk DataFile Version 2.0 example_unstruct_grid ASCII @@ -590,61 +547,40 @@ LOOKUP_TABLE default 1.02 1.50 0.00 -3 5 6 23423423423423423423.23423423''') +3 5 6 23423423423423423423.23423423""") self.f.close() def writeNodes2VTK(self, fobj): self.VTKcnt=200 # how many numbers per line in vtk file - #self.VTKcnt=6 - ndCnt=1 - #self.nodes=range(0,10) fobj.write('POINTS %i'%self.p.nodes()+' float\n') for iNd in self.nodes: nd=self.p.node(iNd) disp=self.p.node_displacement(iNd) - #contact=self.p.node_scalar(iNd,contactNr) - #ndCnt=ndCnt+1 fobj.write('%f %f %f \n'% - #(nd.x, nd.y, nd.z)) (nd.x+disp[0], nd.y+disp[1], nd.z+disp[2])) - - #if ndCnt>6: - # fobj.write('\n') - # ndCnt=1 fobj.write('\n') print('Nodes written to VTK: %i'%self.p.nodes()) - #print('Nr of nodes: ',self.nodes) def writeElements2VTK(self, fobj): fobj.write('\nCELLS %i %i'%(self.p.elements(),self.p.elements()*9)+'\n') for iEl in range(0,self.nel): el=self.p.element(iEl) - #disp=self.p.node_displacement(iNd) - #contact=self.p.node_scalar(iNd,contactNr) - #ndCnt=ndCnt+1 fobj.write('8 ') ndlist=el.items - #for k in [0, 1, 3, 2, 4, 5, 7, 6]: # FOR CELL TPYE VTK_VOXEL - #for k in [0, 4, 3, 1, 5, 7, 6, 2]: for k in [0, 1, 2, 3, 4, 5, 6, 7]: # FOR CELL TYPE VTK_HEXAHEDRON fobj.write('%6i '%(ndlist[k]-1)) fobj.write('\n') - #if ndCnt>6: - # fobj.write('\n') - # ndCnt=1 fobj.write('\nCELL_TYPES %i'%self.p.elements()+'\n') cnt=0 for iEl in range(0,self.nel): cnt=cnt+1 - #fobj.write('11\n') #VTK_VOXEL fobj.write('12 ') #VTK_HEXAHEDRON if cnt>self.VTKcnt: fobj.write('\n');cnt=0 fobj.write('\n') print('Elements written to VTK: %i'%self.p.elements()) - #print('Nr of nodes: ',self.nodes) def writeElScalars2NodesVTK(self,fobj): fobj.write('\nPOINT_DATA %i\n'%self.p.nodes()) @@ -668,10 +604,7 @@ LOOKUP_TABLE default fobj.write('\n') def writeNodeScalars2VTK(self,fobj): - #print('writeElementData2VTK') fobj.write('\nPOINT_DATA %i\n'%self.p.nodes()) - nNdDat=self.nscals - nComponents=1+nNdDat for idxNdScal in range(-3,self.nscals): # include node x,y,z if idxNdScal>=0: datalabel=self.nscal_list[idxNdScal] @@ -700,10 +633,7 @@ LOOKUP_TABLE default fobj.write('\n') def writeElementData2VTK(self,fobj): - #print('writeElementData2VTK') fobj.write('\nCELL_DATA %i\n'%self.p.elements()) - nElDat=self.elscals - nComponents=1+nElDat for idxElScal in range(0,self.elscals): datalabel=self.elscal_list[idxElScal] datalabel=re.sub("\s",'_',datalabel) @@ -730,7 +660,7 @@ LOOKUP_TABLE default def example1(self): self.openFile() self.writeFirstLines() - self.f.write('''DATASET POLYDATA + self.f.write("""DATASET POLYDATA POINTS 8 float 0.0 0.0 0.0 1.0 0.0 0.0 @@ -789,18 +719,20 @@ LOOKUP_TABLE my_table 8 0.0 0.0 1.0 1.0 1.0 0.0 1.0 1.0 0.0 1.0 1.0 1.0 -1.0 1.0 1.0 1.0''') +1.0 1.0 1.0 1.0""") self.f.close() import pyvtk class marc_to_vtk(): - ''' - Anybody wants to implement it with pyvtk? - The advantage would be that pyvtk can also wirte the - -VTK format and binary. - These can be plotted with mayavi. - ''' + """ + Anybody wants to implement it with pyvtk? + + The advantage would be that pyvtk can also wirte the + -VTK format and binary. + These can be plotted with mayavi. + """ + def __init__(self): self.p=[]#MARC_POST() # self.p @@ -810,5 +742,4 @@ class marc_to_vtk(): hexahedron=self.p.cells), 'm2v output') vtk.tofile('m2v_file') - #vtk.tofile('example3b','binary') - #VtkData('example3') \ No newline at end of file + diff --git a/lib/damask/solver/__init__.py b/lib/damask/solver/__init__.py index aebeae33e..cd8f0b193 100644 --- a/lib/damask/solver/__init__.py +++ b/lib/damask/solver/__init__.py @@ -1,8 +1,7 @@ # -*- coding: UTF-8 no BOM -*- +"""Tools to control the various BVP solvers""" -# $Id$ - -from .solver import Solver # only one class -from .spectral import Spectral # only one class -from .marc import Marc # only one class -from .abaqus import Abaqus # only one class +from .solver import Solver # noqa +from .spectral import Spectral # noqa +from .marc import Marc # noqa +from .abaqus import Abaqus # noqa diff --git a/lib/damask/solver/abaqus.py b/lib/damask/solver/abaqus.py index f016e2794..7a8321a9c 100644 --- a/lib/damask/solver/abaqus.py +++ b/lib/damask/solver/abaqus.py @@ -7,7 +7,8 @@ from .solver import Solver class Abaqus(Solver): - def __init__(self,version='',solver=''): # example of version string: 6.12-2, solver: either std or exp + + def __init__(self,version='',solver=''): # example version string: 6.12-2, solver: std or exp self.solver='Abaqus' if version =='': import subprocess diff --git a/lib/damask/solver/marc.py b/lib/damask/solver/marc.py index 3b47ee773..660f2f6ef 100644 --- a/lib/damask/solver/marc.py +++ b/lib/damask/solver/marc.py @@ -7,9 +7,7 @@ from .solver import Solver class Marc(Solver): -#-------------------------- def __init__(self): -#-------------------------- self.solver = 'Marc' self.releases = { \ '2015': ['linux64',''], @@ -24,7 +22,6 @@ class Marc(Solver): #-------------------------- def version(self,rootRelation = ''): -#-------------------------- import os,damask.environment MSCpath = damask.environment.Environment(rootRelation).options['MSC_ROOT'] @@ -40,7 +37,6 @@ class Marc(Solver): #-------------------------- def libraryPath(self,rootRelation = '',releases = []): -#-------------------------- import os,damask.environment MSCpath = damask.environment.Environment(rootRelation).options['MSC_ROOT'] @@ -59,7 +55,6 @@ class Marc(Solver): #-------------------------- def toolsPath(self,rootRelation = '',release = ''): -#-------------------------- import os,damask.environment MSCpath = damask.environment.Environment(rootRelation).options['MSC_ROOT'] @@ -72,7 +67,6 @@ class Marc(Solver): #-------------------------- def submit_job(self, -#-------------------------- rootRelation = '', release = '', model = 'model', @@ -84,7 +78,7 @@ class Marc(Solver): ): import os,damask.environment - import subprocess,shlex,shutil + import subprocess,shlex if len(release) == 0: release = self.version(rootRelation) @@ -94,7 +88,7 @@ class Marc(Solver): damaskEnv = damask.environment.Environment(rootRelation) - user = os.path.join(damaskEnv.relPath('code/'),'DAMASK_marc') # might be updated if special version is found (usually symlink) + user = os.path.join(damaskEnv.relPath('code/'),'DAMASK_marc') # might be updated if special version (symlink) is found if compile: if os.path.isfile(os.path.join(damaskEnv.relPath('code/'),'DAMASK_marc%s.f90'%release)): user = os.path.join(damaskEnv.relPath('code/'),'DAMASK_marc%s'%release) @@ -123,7 +117,6 @@ class Marc(Solver): #-------------------------- def exit_number_from_outFile(self,outFile=None): -#-------------------------- import string exitnumber = -1 fid_out = open(outFile,'r') diff --git a/lib/damask/solver/solver.py b/lib/damask/solver/solver.py index bef87ed4a..53c055f05 100644 --- a/lib/damask/solver/solver.py +++ b/lib/damask/solver/solver.py @@ -5,10 +5,11 @@ import damask.solver class Solver(): - ''' - General class for solver specific functionality. - Sub-classed by the individual solvers. - ''' + """ + General class for solver specific functionality. + + Sub-classed by the individual solvers. + """ def __init__(self,solver=''): solverClass = { diff --git a/lib/damask/test/__init__.py b/lib/damask/test/__init__.py index 9ecfb0c57..ba8d14a00 100644 --- a/lib/damask/test/__init__.py +++ b/lib/damask/test/__init__.py @@ -1,5 +1,5 @@ # -*- coding: UTF-8 no BOM -*- -# $Id$ +"""Test functionality""" -from .test import Test +from .test import Test # noqa diff --git a/lib/damask/test/test.py b/lib/damask/test/test.py index 235d74bcd..826b190c1 100644 --- a/lib/damask/test/test.py +++ b/lib/damask/test/test.py @@ -2,17 +2,19 @@ # $Id$ -import os, sys, shlex, inspect -import subprocess,shutil,string -import logging, logging.config +import os,sys,shutil +import logging,logging.config import damask +import numpy as np +from collections import Iterable from optparse import OptionParser class Test(): - ''' - General class for testing. - Is sub-classed by the individual tests. - ''' + """ + General class for testing. + + Is sub-classed by the individual tests. + """ variants = [] @@ -20,11 +22,11 @@ class Test(): logger = logging.getLogger() logger.setLevel(0) - fh = logging.FileHandler('test.log') # create file handler which logs even debug messages + fh = logging.FileHandler('test.log') # create file handler which logs even debug messages fh.setLevel(logging.DEBUG) full = logging.Formatter('%(asctime)s - %(levelname)s: \n%(message)s') fh.setFormatter(full) - ch = logging.StreamHandler(stream=sys.stdout) # create console handler with a higher log level + ch = logging.StreamHandler(stream=sys.stdout) # create console handler with a higher log level ch.setLevel(logging.INFO) # create formatter and add it to the handlers plain = logging.Formatter('%(message)s') @@ -52,18 +54,16 @@ class Test(): accept=False) def execute(self): - ''' - Run all variants and report first failure. - ''' + """Run all variants and report first failure.""" if self.options.debug: for variant in xrange(len(self.variants)): try: self.postprocess(variant) if not self.compare(variant): - return variant+1 # return culprit + return variant+1 # return culprit except Exception as e : - logging.critical('\nWARNING:\n %s\n'%e) - return variant+1 # return culprit + logging.critical('\nWARNING:\n {}\n'.format(e)) + return variant+1 # return culprit return 0 else: if not self.testPossible(): return -1 @@ -74,139 +74,109 @@ class Test(): self.prepare(variant) self.run(variant) self.postprocess(variant) - if self.updateRequested: # update requested + if self.updateRequested: # update requested self.update(variant) - elif not (self.options.accept or self.compare(variant)): # no update, do comparison - return variant+1 # return culprit + elif not (self.options.accept or self.compare(variant)): # no update, do comparison + return variant+1 # return culprit except Exception as e : - logging.critical('\nWARNING:\n %s\n'%e) - return variant+1 # return culprit + logging.critical('\nWARNING:\n {}\n'.format(e)) + return variant+1 # return culprit return 0 def testPossible(self): - ''' - Check if test is possible or not (e.g. no license available). - ''' + """Check if test is possible or not (e.g. no license available).""" return True def clean(self): - ''' - Delete directory tree containing current results. - ''' + """Delete directory tree containing current results.""" status = True try: shutil.rmtree(self.dirCurrent()) except: - logging.warning('removal of directory "%s" not possible...'%(self.dirCurrent())) + logging.warning('removal of directory "{}" not possible...'.format(self.dirCurrent())) status = status and False try: os.mkdir(self.dirCurrent()) except: - logging.critical('creation of directory "%s" failed...'%(self.dirCurrent())) + logging.critical('creation of directory "{}" failed...'.format(self.dirCurrent())) status = status and False return status def prepareAll(self): - ''' - Do all necessary preparations for the whole test - ''' + """Do all necessary preparations for the whole test""" return True def prepare(self,variant): - ''' - Do all necessary preparations for the run of each test variant - ''' + """Do all necessary preparations for the run of each test variant""" return True def run(self,variant): - ''' - Execute the requested test variant. - ''' + """Execute the requested test variant.""" return True def postprocess(self,variant): - ''' - Perform post-processing of generated results for this test variant. - ''' + """Perform post-processing of generated results for this test variant.""" return True def compare(self,variant): - ''' - Compare reference to current results. - ''' + """Compare reference to current results.""" return True def update(self,variant): - ''' - Update reference with current results. - ''' + """Update reference with current results.""" logging.debug('Update not necessary') return True def dirReference(self): - ''' - Directory containing reference results of the test. - ''' + """Directory containing reference results of the test.""" return os.path.normpath(os.path.join(self.dirBase,'reference/')) def dirCurrent(self): - ''' - Directory containing current results of the test. - ''' + """Directory containing current results of the test.""" return os.path.normpath(os.path.join(self.dirBase,'current/')) def dirProof(self): - ''' - Directory containing human readable proof of correctness for the test. - ''' + """Directory containing human readable proof of correctness for the test.""" return os.path.normpath(os.path.join(self.dirBase,'proof/')) def fileInRoot(self,dir,file): - ''' - Path to a file in the root directory of DAMASK. - ''' + """Path to a file in the root directory of DAMASK.""" return os.path.join(damask.Environment().rootDir(),dir,file) def fileInReference(self,file): - ''' - Path to a file in the refrence directory for the test. - ''' + """Path to a file in the refrence directory for the test.""" return os.path.join(self.dirReference(),file) def fileInCurrent(self,file): - ''' - Path to a file in the current results directory for the test. - ''' + """Path to a file in the current results directory for the test.""" return os.path.join(self.dirCurrent(),file) def fileInProof(self,file): - ''' - Path to a file in the proof directory for the test. - ''' + """Path to a file in the proof directory for the test.""" return os.path.join(self.dirProof(),file) - + def copy(self, mapA, mapB, A = [], B = []): - ''' + """ copy list of files from (mapped) source to target. + mapA/B is one of self.fileInX. - ''' - + """ if not B or len(B) == 0: B = A for source,target in zip(map(mapA,A),map(mapB,B)): @@ -223,19 +193,19 @@ class Test(): try: shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i])) except: - logging.critical('Reference2Current: Unable to copy file %s'%file) + logging.critical('Reference2Current: Unable to copy file "{}"'.format(file)) def copy_Base2Current(self,sourceDir,sourcefiles=[],targetfiles=[]): - source=os.path.normpath(os.path.join(self.dirBase,'../../../'+sourceDir)) + source=os.path.normpath(os.path.join(self.dirBase,'../../..',sourceDir)) if len(targetfiles) == 0: targetfiles = sourcefiles for i,file in enumerate(sourcefiles): try: shutil.copy2(os.path.join(source,file),self.fileInCurrent(targetfiles[i])) except: logging.error(os.path.join(source,file)) - logging.critical('Base2Current: Unable to copy file %s'%file) + logging.critical('Base2Current: Unable to copy file "{}"'.format(file)) def copy_Current2Reference(self,sourcefiles=[],targetfiles=[]): @@ -245,7 +215,7 @@ class Test(): try: shutil.copy2(self.fileInCurrent(file),self.fileInReference(targetfiles[i])) except: - logging.critical('Current2Reference: Unable to copy file %s'%file) + logging.critical('Current2Reference: Unable to copy file "{}"'.format(file)) def copy_Proof2Current(self,sourcefiles=[],targetfiles=[]): @@ -255,7 +225,7 @@ class Test(): try: shutil.copy2(self.fileInProof(file),self.fileInCurrent(targetfiles[i])) except: - logging.critical('Proof2Current: Unable to copy file %s'%file) + logging.critical('Proof2Current: Unable to copy file "{}"'.format(file)) def copy_Current2Current(self,sourcefiles=[],targetfiles=[]): @@ -264,7 +234,7 @@ class Test(): try: shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i])) except: - logging.critical('Current2Current: Unable to copy file %s'%file) + logging.critical('Current2Current: Unable to copy file "{}"'.format(file)) def execute_inCurrentDir(self,cmd,streamIn=None): @@ -282,7 +252,7 @@ class Test(): def compare_Array(self,File1,File2): import numpy as np - logging.info('comparing\n '+File1+'\n '+File2) + logging.info('\n '.join(['comparing',File1,File2])) table1 = damask.ASCIItable(name=File1,readonly=True) table1.head_read() len1=len(table1.info)+2 @@ -300,8 +270,9 @@ class Test(): max_loc=np.argmax(abs(refArrayNonZero[curArray.nonzero()]/curArray[curArray.nonzero()]-1.)) refArrayNonZero = refArrayNonZero[curArray.nonzero()] curArray = curArray[curArray.nonzero()] - print(' ********\n * maximum relative error %e for %e and %e\n ********' - %(max_err, refArrayNonZero[max_loc],curArray[max_loc])) + print(' ********\n * maximum relative error {} between {} and {}\n ********'.format(max_err, + refArrayNonZero[max_loc], + curArray[max_loc])) return max_err else: raise Exception('mismatch in array size to compare') @@ -325,10 +296,11 @@ class Test(): absoluteTolerance=False,perLine=False,skipLines=[]): import numpy as np - logging.info('comparing ASCII Tables\n %s \n %s'%(file0,file1)) + logging.info('\n '.join(['comparing ASCII Tables',file0,file1])) if normHeadings == '': normHeadings = headings0 - if len(headings0) == len(headings1) == len(normHeadings): #check if comparison is possible and determine lenght of columns +# check if comparison is possible and determine lenght of columns + if len(headings0) == len(headings1) == len(normHeadings): dataLength = len(headings0) length = [1 for i in xrange(dataLength)] shape = [[] for i in xrange(dataLength)] @@ -344,7 +316,7 @@ class Test(): for i in xrange(dataLength): if headings0[i]['shape'] != headings1[i]['shape']: - raise Exception('shape mismatch when comparing %s with %s '%(headings0[i]['label'],headings1[i]['label'])) + raise Exception('shape mismatch between {} and {} '.format(headings0[i]['label'],headings1[i]['label'])) shape[i] = headings0[i]['shape'] for j in xrange(np.shape(shape[i])[0]): length[i] *= shape[i][j] @@ -352,7 +324,9 @@ class Test(): for j in xrange(np.shape(normShape[i])[0]): normLength[i] *= normShape[i][j] else: - raise Exception('trying to compare %i with %i normed by %i data sets'%(len(headings0),len(headings1),len(normHeadings))) + raise Exception('trying to compare {} with {} normed by {} data sets'.format(len(headings0), + len(headings1), + len(normHeadings))) table0 = damask.ASCIItable(name=file0,readonly=True) table0.head_read() @@ -360,37 +334,34 @@ class Test(): table1.head_read() for i in xrange(dataLength): - key0 = {True :'1_%s', - False:'%s' }[length[i]>1]%headings0[i]['label'] - key1 = {True :'1_%s', - False:'%s' }[length[i]>1]%headings1[i]['label'] - normKey = {True :'1_%s', - False:'%s' }[normLength[i]>1]%normHeadings[i]['label'] + key0 = ('1_' if length[i]>1 else '') + headings0[i]['label'] + key1 = ('1_' if length[i]>1 else '') + headings1[i]['label'] + normKey = ('1_' if normLength[i]>1 else '') + normHeadings[i]['label'] if key0 not in table0.labels: - raise Exception('column %s not found in 1. table...\n'%key0) + raise Exception('column {} not found in 1. table...\n'.format(key0)) elif key1 not in table1.labels: - raise Exception('column %s not found in 2. table...\n'%key1) + raise Exception('column {} not found in 2. table...\n'.format(key1)) elif normKey not in table0.labels: - raise Exception('column %s not found in 1. table...\n'%normKey) + raise Exception('column {} not found in 1. table...\n'.format(normKey)) else: - column[0][i] = table0.labels.index(key0) # remember columns of requested data - column[1][i] = table1.labels.index(key1) # remember columns of requested data in second column - normColumn[i] = table0.labels.index(normKey) # remember columns of requested data in second column + column[0][i] = table0.labels.index(key0) + column[1][i] = table1.labels.index(key1) + normColumn[i] = table0.labels.index(normKey) line0 = 0 - while table0.data_read(): # read next data line of ASCII table + while table0.data_read(): # read next data line of ASCII table if line0 not in skipLines: for i in xrange(dataLength): myData = np.array(map(float,table0.data[column[0][i]:\ - column[0][i]+length[i]]),'d') + column[0][i]+length[i]]),'d') normData = np.array(map(float,table0.data[normColumn[i]:\ - normColumn[i]+normLength[i]]),'d') + normColumn[i]+normLength[i]]),'d') data[i] = np.append(data[i],np.reshape(myData,shape[i])) if normType == 'pInf': norm[i] = np.append(norm[i],np.max(np.abs(normData))) else: norm[i] = np.append(norm[i],np.linalg.norm(np.reshape(normData,normShape[i]),normType)) - line0 +=1 + line0 += 1 for i in xrange(dataLength): if not perLine: norm[i] = [np.max(norm[i]) for j in xrange(line0-len(skipLines))] @@ -399,12 +370,12 @@ class Test(): norm[i] = [1.0 for j in xrange(line0-len(skipLines))] absTol[i] = True if perLine: - logging.warning('At least one norm of %s in 1. table is 0.0, using absolute tolerance'%headings0[i]['label']) + logging.warning('At least one norm of {} in 1. table is 0.0, using absolute tolerance'.format(headings0[i]['label'])) else: - logging.warning('Maximum norm of %s in 1. table is 0.0, using absolute tolerance'%headings0[i]['label']) + logging.warning('Maximum norm of {} in 1. table is 0.0, using absolute tolerance'.format(headings0[i]['label'])) line1 = 0 - while table1.data_read(): # read next data line of ASCII table + while table1.data_read(): # read next data line of ASCII table if line1 not in skipLines: for i in xrange(dataLength): myData = np.array(map(float,table1.data[column[1][i]:\ @@ -413,45 +384,45 @@ class Test(): norm[i][line1-len(skipLines)]) line1 +=1 - if (line0 != line1): raise Exception('found %s lines in 1. table and %s in 2. table'%(line0,line1)) + if (line0 != line1): raise Exception('found {} lines in 1. table but {} in 2. table'.format(line0,line1)) logging.info(' ********') for i in xrange(dataLength): if absTol[i]: - logging.info(' * maximum absolute error %e for %s and %s'%(maxError[i],headings0[i]['label'],headings1[i]['label'])) + logging.info(' * maximum absolute error {} between {} and {}'.format(maxError[i], + headings0[i]['label'], + headings1[i]['label'])) else: - logging.info(' * maximum relative error %e for %s and %s'%(maxError[i],headings0[i]['label'],headings1[i]['label'])) + logging.info(' * maximum relative error {} between {} and {}'.format(maxError[i], + headings0[i]['label'], + headings1[i]['label'])) logging.info(' ********') return maxError def compare_TablesStatistically(self, - files = [None,None], # list of file names - columns = [None], # list of list of column labels (per file) + files = [None,None], # list of file names + columns = [None], # list of list of column labels (per file) meanTol = 1.0e-4, stdTol = 1.0e-6, preFilter = 1.0e-9): - - ''' - calculate statistics of tables - threshold can be used to ignore small values (a negative number disables this feature) - ''' + """ + calculate statistics of tables - import numpy as np - from collections import Iterable - - if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested + threshold can be used to ignore small values (a negative number disables this feature) + """ + if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested files = [str(files)] tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files] for table in tables: table.head_read() - columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files - columns = columns[:len(files)] # truncate to same length as files + columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files + columns = columns[:len(files)] # truncate to same length as files for i,column in enumerate(columns): - if column is None: columns[i] = tables[i].labels # if no column is given, read all + if column is None: columns[i] = tables[i].labels # if no column is given, read all logging.info('comparing ASCIItables statistically') for i in xrange(len(columns)): @@ -461,7 +432,7 @@ class Test(): ) logging.info(files[i]+':'+','.join(columns[i])) - if len(files) < 2: return True # single table is always close to itself... + if len(files) < 2: return True # single table is always close to itself... data = [] for table,labels in zip(tables,columns): @@ -476,42 +447,38 @@ class Test(): normedDelta = np.where(normBy>preFilter,delta/normBy,0.0) mean = np.amax(np.abs(np.mean(normedDelta,0))) std = np.amax(np.std(normedDelta,0)) - logging.info('mean: %f'%mean) - logging.info('std: %f'%std) + logging.info('mean: {:f}'.format(mean)) + logging.info('std: {:f}'.format(std)) return (mean0.0, maximum, 1) # do not devide by zero for empty columns + maximum = np.where(maximum >0.0, maximum, 1) # avoid div by zero for empty columns for i in xrange(len(data)): data[i] /= maximum mask = np.zeros_like(table.data,dtype='bool') for table in data: - mask |= np.where(np.abs(table) 1])) + logging.critical(('The test' if len(self.variants) == 1 else 'All {} tests'.format(len(self.variants))) + ' passed') logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n') return 0 if culprit == -1: logging.warning('Warning: Could not start test') return 0 else: - logging.critical(' ********\n * Test %i failed...\n ********'%(culprit)) + logging.critical(' ********\n * Test {} failed...\n ********'.format(culprit)) logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n') return culprit diff --git a/lib/damask/util.py b/lib/damask/util.py index b3e35812a..244aaa25c 100644 --- a/lib/damask/util.py +++ b/lib/damask/util.py @@ -6,11 +6,13 @@ import numpy as np from optparse import Option class bcolors: - ''' - ASCII Colors (Blender code) - https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py - http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python - ''' + """ + ASCII Colors (Blender code) + + https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py + http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python + """ + HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' @@ -32,32 +34,48 @@ class bcolors: # ----------------------------- -def srepr(arg, - glue = '\n'): -# ----------------------------- - if (not hasattr(arg, "strip") and - hasattr(arg, "__getitem__") or - hasattr(arg, "__iter__")): - return glue.join(srepr(x) for x in arg) - return arg if isinstance(arg,basestring) else repr(arg) +def srepr(arg,glue = '\n'): + """joins arguments as individual lines""" + if (not hasattr(arg, "strip") and + hasattr(arg, "__getitem__") or + hasattr(arg, "__iter__")): + return glue.join(srepr(x) for x in arg) + return arg if isinstance(arg,basestring) else repr(arg) # ----------------------------- -def croak(what, - newline = True): -# ----------------------------- +def croak(what, newline = True): + """writes formated to stderr""" sys.stderr.write(srepr(what,glue = '\n') + ('\n' if newline else '')) sys.stderr.flush() # ----------------------------- def report(who,what): -# ----------------------------- + """reports script and file name""" croak( (emph(who) if who else '') + (': '+what if what else '') ) # ----------------------------- def emph(what): -# ----------------------------- + """emphasizes string on screen""" return bcolors.BOLD+srepr(what)+bcolors.ENDC +# ----------------------------- +def execute(cmd, + streamIn = None, + wd = './'): + """executes a command in given directory and returns stdout and stderr for optional stdin""" + initialPath = os.getcwd() + os.chdir(wd) + process = subprocess.Popen(shlex.split(cmd), + stdout = subprocess.PIPE, + stderr = subprocess.PIPE, + stdin = subprocess.PIPE) + out,error = [i.replace("\x08","") for i in (process.communicate() if streamIn is None + else process.communicate(streamIn.read()))] + os.chdir(initialPath) + if process.returncode != 0: raise RuntimeError('{} failed with returncode {}'.format(cmd,process.returncode)) + return out,error + + # ----------------------------- # Matlab like trigonometric functions that take and return angles in degrees. # ----------------------------- @@ -68,7 +86,6 @@ for f in ['cos', 'sin', 'tan']: # ----------------------------- def gridLocation(idx,res): -# ----------------------------- return ( idx % res[0], \ ( idx // res[0]) % res[1], \ ( idx // res[0] // res[1]) % res[2] ) @@ -76,18 +93,19 @@ def gridLocation(idx,res): # ----------------------------- def gridIndex(location,res): -# ----------------------------- - return ( location[0] % res[0] + \ + return ( location[0] % res[0] + \ ( location[1] % res[1]) * res[0] + \ ( location[2] % res[2]) * res[1] * res[0] ) # ----------------------------- class extendableOption(Option): -# ----------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - + """ + used for definition of new option parser action 'extend', which enables to take multiple option arguments + + taken from online tutorial http://docs.python.org/library/optparse.html + """ + ACTIONS = Option.ACTIONS + ("extend",) STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) @@ -102,28 +120,36 @@ class extendableOption(Option): # ----------------------------- class backgroundMessage(threading.Thread): -# ----------------------------- - choices = {'bounce': ['_','o','O','°','¯','¯','°','O','o',], + """reporting with animation to indicate progress""" + + choices = {'bounce': ['_', 'o', 'O', u'\u00B0', + u'\u203e',u'\u203e',u'\u00B0','O','o','_'], + 'spin': [u'\u25dc',u'\u25dd',u'\u25de',u'\u25df'], 'circle': [u'\u25f4',u'\u25f5',u'\u25f6',u'\u25f7'], 'hexagon': [u'\u2b22',u'\u2b23'], 'square': [u'\u2596',u'\u2598',u'\u259d',u'\u2597'], 'triangle': [u'\u140a',u'\u140a',u'\u1403',u'\u1405',u'\u1405',u'\u1403'], - 'amoeba': [u'\u2596',u'\u258f',u'\u2598',u'\u2594',u'\u259d',u'\u2595',u'\u2597',u'\u2582'], - 'beat': [u'\u2581',u'\u2582',u'\u2583',u'\u2585',u'\u2586',u'\u2587',u'\u2587',u'\u2586',u'\u2585',u'\u2583',u'\u2582',], - 'prison': [u'\u168b',u'\u168c',u'\u168d',u'\u168f',u'\u168e',u'\u168d',u'\u168c',u'\u168b',], - 'breath': [u'\u1690',u'\u1691',u'\u1692',u'\u1693',u'\u1694',u'\u1693',u'\u1692',u'\u1691',u'\u1690',], + 'amoeba': [u'\u2596',u'\u258f',u'\u2598',u'\u2594',u'\u259d',u'\u2595', + u'\u2597',u'\u2582'], + 'beat': [u'\u2581',u'\u2582',u'\u2583',u'\u2585',u'\u2586',u'\u2587', + u'\u2587',u'\u2586',u'\u2585',u'\u2583',u'\u2582',], + 'prison': [u'\u168b',u'\u168c',u'\u168d',u'\u168f',u'\u168e',u'\u168d', + u'\u168c',u'\u168b',], + 'breath': [u'\u1690',u'\u1691',u'\u1692',u'\u1693',u'\u1694',u'\u1693', + u'\u1692',u'\u1691',u'\u1690',], 'pulse': [u'·',u'•',u'\u25cf',u'\u25cf',u'•',], - 'ant': [u'\u2801',u'\u2802',u'\u2810',u'\u2820',u'\u2804',u'\u2840',u'\u2880',u'\u2820',u'\u2804',u'\u2802',u'\u2810',u'\u2808'], - 'juggle': [u'\ua708',u'\ua709',u'\ua70a',u'\ua70b',u'\ua70c',u'\ua711',u'\ua710',u'\ua70f',u'\ua70d',], + 'ant': [u'\u2801',u'\u2802',u'\u2810',u'\u2820',u'\u2804',u'\u2840', + u'\u2880',u'\u2820',u'\u2804',u'\u2802',u'\u2810',u'\u2808'], + 'juggle': [u'\ua708',u'\ua709',u'\ua70a',u'\ua70b',u'\ua70c',u'\ua711', + u'\ua710',u'\ua70f',u'\ua70d',], # 'wobbler': [u'\u2581',u'\u25e3',u'\u258f',u'\u25e4',u'\u2594',u'\u25e5',u'\u2595',u'\u25e2',], 'grout': [u'\u2581',u'\u258f',u'\u2594',u'\u2595',], 'partner': [u'\u26ac',u'\u26ad',u'\u26ae',u'\u26af',u'\u26ae',u'\u26ad',], 'classic': ['-', '\\', '|', '/',], } - def __init__(self, - symbol = None, - wait = 0.1): + def __init__(self,symbol = None,wait = 0.1): + """sets animation symbol""" super(backgroundMessage, self).__init__() self._stop = threading.Event() self.message = '' @@ -134,20 +160,20 @@ class backgroundMessage(threading.Thread): self.waittime = wait def __quit__(self): + """cleans output""" length = len(self.symbols[self.counter] + self.gap + self.message) sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) sys.stderr.write('') sys.stderr.flush() def stop(self): - self._stop.set() + self._stop.set() def stopped(self): - return self._stop.is_set() + return self._stop.is_set() def run(self): -# while not threading.enumerate()[0]._Thread__stopped: - while not self.stopped(): + while not threading.enumerate()[0]._Thread__stopped: time.sleep(self.waittime) self.update_message() self.__quit__() @@ -159,7 +185,7 @@ class backgroundMessage(threading.Thread): def print_message(self): length = len(self.symbols[self.counter] + self.gap + self.message) sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length + \ - self.symbols[self.counter] + self.gap + self.new_message) # delete former and print new message + self.symbols[self.counter].encode('utf-8') + self.gap + self.new_message) # delete former and print new message sys.stderr.flush() self.message = self.new_message @@ -170,24 +196,38 @@ class backgroundMessage(threading.Thread): def animation(self,which = None): return ''.join(self.choices[which]) if which in self.choices else '' -''' -Non-linear least square fitting (Levenberg-Marquardt method) with -bounded parameters. -the codes of transformation between int <-> ext refers to the work of -Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy -other codes refers to the source code of minpack.py: -..\Lib\site-packages\scipy\optimize\minpack.py -''' -from numpy import (array, arcsin, asarray, cos, dot, eye, empty_like, - isscalar,finfo, take, triu, transpose, sqrt, sin) -def _check_func(checker, argname, thefunc, x0, args, numinputs, +def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0, + col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, + gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): + from scipy.optimize import _minpack + """ + Non-linear least square fitting (Levenberg-Marquardt method) with + bounded parameters. + the codes of transformation between int <-> ext refers to the work of + Jonathan J. Helmus: https://github.com/jjhelmus/leastsqbound-scipy + other codes refers to the source code of minpack.py: + ..\Lib\site-packages\scipy\optimize\minpack.py + + An internal parameter list is used to enforce contraints on the fitting + parameters. The transfomation is based on that of MINUIT package. + please see: F. James and M. Winkler. MINUIT User's Guide, 2004. + + bounds : list + (min, max) pairs for each parameter, use None for 'min' or 'max' + when there is no bound in that direction. + For example: if there are two parameters needed to be fitting, then + bounds is [(min1,max1), (min2,max2)] + + This function is based on 'leastsq' of minpack.py, the annotation of + other parameters can be found in 'leastsq'. + ..\Lib\site-packages\scipy\optimize\minpack.py + """ + + def _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape=None): - from numpy import atleast_1d, shape, issubdtype, dtype, inexact - ''' - The same as that of minpack.py, - ''' - res = atleast_1d(thefunc(*((x0[:numinputs],) + args))) + """The same as that of minpack.py""" + res = np.atleast_1d(thefunc(*((x0[:numinputs],) + args))) if (output_shape is not None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: @@ -201,203 +241,171 @@ def _check_func(checker, argname, thefunc, x0, args, numinputs, else: msg += "." raise TypeError(msg) - if issubdtype(res.dtype, inexact): + if np.issubdtype(res.dtype, np.inexact): dt = res.dtype else: dt = dtype(float) return shape(res), dt - -def _int2extGrad(p_int, bounds): - """ - Calculate the gradients of transforming the internal (unconstrained) - to external (constained) parameter. - """ - grad = empty_like(p_int) + + def _int2extGrad(p_int, bounds): + """Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter.""" + grad = np.empty_like(p_int) for i, (x, bound) in enumerate(zip(p_int, bounds)): lower, upper = bound if lower is None and upper is None: # No constraints grad[i] = 1.0 elif upper is None: # only lower bound - grad[i] = x/sqrt(x*x + 1.0) + grad[i] = x/np.sqrt(x*x + 1.0) elif lower is None: # only upper bound - grad[i] = -x/sqrt(x*x + 1.0) + grad[i] = -x/np.sqrt(x*x + 1.0) else: # lower and upper bounds - grad[i] = (upper - lower)*cos(x)/2.0 + grad[i] = (upper - lower)*np.cos(x)/2.0 return grad - -def _int2extFunc(bounds): - ''' - transform internal parameters into external parameters. - ''' + + def _int2extFunc(bounds): + """transform internal parameters into external parameters.""" local = [_int2extLocal(b) for b in bounds] def _transform_i2e(p_int): - p_ext = empty_like(p_int) + p_ext = np.empty_like(p_int) p_ext[:] = [i(j) for i, j in zip(local, p_int)] return p_ext return _transform_i2e - -def _ext2intFunc(bounds): - ''' - transform external parameters into internal parameters. - ''' + + def _ext2intFunc(bounds): + """transform external parameters into internal parameters.""" local = [_ext2intLocal(b) for b in bounds] def _transform_e2i(p_ext): - p_int = empty_like(p_ext) + p_int = np.empty_like(p_ext) p_int[:] = [i(j) for i, j in zip(local, p_ext)] return p_int return _transform_e2i - -def _int2extLocal(bound): - ''' - transform a single internal parameter to an external parameter. - ''' + + def _int2extLocal(bound): + """transform a single internal parameter to an external parameter.""" lower, upper = bound if lower is None and upper is None: # no constraints return lambda x: x elif upper is None: # only lower bound - return lambda x: lower - 1.0 + sqrt(x*x + 1.0) + return lambda x: lower - 1.0 + np.sqrt(x*x + 1.0) elif lower is None: # only upper bound - return lambda x: upper + 1.0 - sqrt(x*x + 1.0) + return lambda x: upper + 1.0 - np.sqrt(x*x + 1.0) else: - return lambda x: lower + ((upper - lower)/2.0)*(sin(x) + 1.0) - -def _ext2intLocal(bound): - ''' - transform a single external parameter to an internal parameter. - ''' + return lambda x: lower + ((upper - lower)/2.0)*(np.sin(x) + 1.0) + + def _ext2intLocal(bound): + """transform a single external parameter to an internal parameter.""" lower, upper = bound if lower is None and upper is None: # no constraints return lambda x: x elif upper is None: # only lower bound - return lambda x: sqrt((x - lower + 1.0)**2 - 1.0) + return lambda x: np.sqrt((x - lower + 1.0)**2 - 1.0) elif lower is None: # only upper bound - return lambda x: sqrt((x - upper - 1.0)**2 - 1.0) + return lambda x: np.sqrt((x - upper - 1.0)**2 - 1.0) else: - return lambda x: arcsin((2.0*(x - lower)/(upper - lower)) - 1.0) + return lambda x: np.arcsin((2.0*(x - lower)/(upper - lower)) - 1.0) + + i2e = _int2extFunc(bounds) + e2i = _ext2intFunc(bounds) + + x0 = np.asarray(x0).flatten() + n = len(x0) -def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0, - col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8, - gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None): - from scipy.optimize import _minpack - ''' - An internal parameter list is used to enforce contraints on the fitting - parameters. The transfomation is based on that of MINUIT package. - please see: F. James and M. Winkler. MINUIT User's Guide, 2004. + if len(bounds) != n: + raise ValueError('the length of bounds is inconsistent with the number of parameters ') + + if not isinstance(args, tuple): + args = (args,) + + shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) + m = shape[0] - bounds : list - (min, max) pairs for each parameter, use None for 'min' or 'max' - when there is no bound in that direction. - For example: if there are two parameters needed to be fitting, then - bounds is [(min1,max1), (min2,max2)] - - This function is based on 'leastsq' of minpack.py, the annotation of - other parameters can be found in 'leastsq'. - ..\Lib\site-packages\scipy\optimize\minpack.py - ''' - i2e = _int2extFunc(bounds) - e2i = _ext2intFunc(bounds) - - x0 = asarray(x0).flatten() - n = len(x0) + if n > m: + raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) + if epsfcn is None: + epsfcn = np.finfo(dtype).eps - if len(bounds) != n: - raise ValueError('the length of bounds is inconsistent with the number of parameters ') - - if not isinstance(args, tuple): - args = (args,) - - shape, dtype = _check_func('leastsq', 'func', func, x0, args, n) - m = shape[0] + def funcWarp(x, *args): + return func(i2e(x), *args) - if n > m: - raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m)) - if epsfcn is None: - epsfcn = finfo(dtype).eps + xi0 = e2i(x0) + + if Dfun is None: + if maxfev == 0: + maxfev = 200*(n + 1) + retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol, + gtol, maxfev, epsfcn, factor, diag) + else: + if col_deriv: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) + else: + _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) + if maxfev == 0: + maxfev = 100*(n + 1) - # wrapped func - def funcWarp(x, *args): - return func(i2e(x), *args) + def DfunWarp(x, *args): + return Dfun(i2e(x), *args) - xi0 = e2i(x0) - - if Dfun is None: - if maxfev == 0: - maxfev = 200*(n + 1) - retval = _minpack._lmdif(funcWarp, xi0, args, full_output, ftol, xtol, - gtol, maxfev, epsfcn, factor, diag) - else: - if col_deriv: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m)) - else: - _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n)) - if maxfev == 0: - maxfev = 100*(n + 1) + retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv, + ftol, xtol, gtol, maxfev, factor, diag) - # wrapped Dfun - def DfunWarp(x, *args): - return Dfun(i2e(x), *args) + errors = {0: ["Improper input parameters.", TypeError], + 1: ["Both actual and predicted relative reductions " + "in the sum of squares\n are at most %f" % ftol, None], + 2: ["The relative error between two consecutive " + "iterates is at most %f" % xtol, None], + 3: ["Both actual and predicted relative reductions in " + "the sum of squares\n are at most %f and the " + "relative error between two consecutive " + "iterates is at \n most %f" % (ftol, xtol), None], + 4: ["The cosine of the angle between func(x) and any " + "column of the\n Jacobian is at most %f in " + "absolute value" % gtol, None], + 5: ["Number of calls to function has reached " + "maxfev = %d." % maxfev, ValueError], + 6: ["ftol=%f is too small, no further reduction " + "in the sum of squares\n is possible.""" % ftol, + ValueError], + 7: ["xtol=%f is too small, no further improvement in " + "the approximate\n solution is possible." % xtol, + ValueError], + 8: ["gtol=%f is too small, func(x) is orthogonal to the " + "columns of\n the Jacobian to machine " + "precision." % gtol, ValueError], + 'unknown': ["Unknown error.", TypeError]} - retval = _minpack._lmder(funcWarp, DfunWarp, xi0, args, full_output, col_deriv, - ftol, xtol, gtol, maxfev, factor, diag) + info = retval[-1] # The FORTRAN return value + + if info not in [1, 2, 3, 4] and not full_output: + if info in [5, 6, 7, 8]: + np.warnings.warn(errors[info][0], RuntimeWarning) + else: + try: + raise errors[info][1](errors[info][0]) + except KeyError: + raise errors['unknown'][1](errors['unknown'][0]) - errors = {0: ["Improper input parameters.", TypeError], - 1: ["Both actual and predicted relative reductions " - "in the sum of squares\n are at most %f" % ftol, None], - 2: ["The relative error between two consecutive " - "iterates is at most %f" % xtol, None], - 3: ["Both actual and predicted relative reductions in " - "the sum of squares\n are at most %f and the " - "relative error between two consecutive " - "iterates is at \n most %f" % (ftol, xtol), None], - 4: ["The cosine of the angle between func(x) and any " - "column of the\n Jacobian is at most %f in " - "absolute value" % gtol, None], - 5: ["Number of calls to function has reached " - "maxfev = %d." % maxfev, ValueError], - 6: ["ftol=%f is too small, no further reduction " - "in the sum of squares\n is possible.""" % ftol, - ValueError], - 7: ["xtol=%f is too small, no further improvement in " - "the approximate\n solution is possible." % xtol, - ValueError], - 8: ["gtol=%f is too small, func(x) is orthogonal to the " - "columns of\n the Jacobian to machine " - "precision." % gtol, ValueError], - 'unknown': ["Unknown error.", TypeError]} + mesg = errors[info][0] + x = i2e(retval[0]) - info = retval[-1] # The FORTRAN return value - - if info not in [1, 2, 3, 4] and not full_output: - if info in [5, 6, 7, 8]: - warnings.warn(errors[info][0], RuntimeWarning) - else: - try: - raise errors[info][1](errors[info][0]) - except KeyError: - raise errors['unknown'][1](errors['unknown'][0]) - - mesg = errors[info][0] - x = i2e(retval[0]) - - if full_output: - grad = _int2extGrad(retval[0], bounds) - retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad, - retval[1]['ipvt'] - 1)).T - cov_x = None - if info in [1, 2, 3, 4]: - from numpy.dual import inv - from numpy.linalg import LinAlgError - perm = take(eye(n), retval[1]['ipvt'] - 1, 0) - r = triu(transpose(retval[1]['fjac'])[:n, :]) - R = dot(r, perm) - try: - cov_x = inv(dot(transpose(R), R)) - except LinAlgError as inverror: - print inverror - pass - return (x, cov_x) + retval[1:-1] + (mesg, info) - else: - return (x, info) + if full_output: + grad = _int2extGrad(retval[0], bounds) + retval[1]['fjac'] = (retval[1]['fjac'].T / np.take(grad, + retval[1]['ipvt'] - 1)).T + cov_x = None + if info in [1, 2, 3, 4]: + from numpy.dual import inv + from numpy.linalg import LinAlgError + perm = np.take(np.eye(n), retval[1]['ipvt'] - 1, 0) + r = np.triu(np.transpose(retval[1]['fjac'])[:n, :]) + R = np.dot(r, perm) + try: + cov_x = inv(np.dot(np.transpose(R), R)) + except LinAlgError as inverror: + print inverror + pass + return (x, cov_x) + retval[1:-1] + (mesg, info) + else: + return (x, info) def _general_function(params, ydata, xdata, function): return function(xdata, *params) - ydata @@ -405,7 +413,7 @@ def _weighted_general_function(params, ydata, xdata, function, weights): return (function(xdata, *params) - ydata)*weights def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw): - ''' Similar as 'curve_fit' in minpack.py''' + """Similar as 'curve_fit' in minpack.py""" if p0 is None: # determine number of parameters by inspecting the function import inspect @@ -418,15 +426,15 @@ def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw): else: p0 = [1.0] * (len(args)-1) - if isscalar(p0): - p0 = array([p0]) + if np.isscalar(p0): + p0 = np.array([p0]) args = (ydata, xdata, f) if sigma is None: func = _general_function else: func = _weighted_general_function - args += (1.0/asarray(sigma),) + args += (1.0/np.asarray(sigma),) return_full = kw.pop('full_output', False) res = leastsqBound(func, p0, args=args, bounds = bounds, full_output=True, **kw) @@ -440,26 +448,6 @@ def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw): s_sq = (func(popt, *args)**2).sum()/(len(ydata)-len(p0)) pcov = pcov * s_sq else: - pcov = inf - - if return_full: - return popt, pcov, infodict, errmsg, ier - else: - return popt, pcov - - -def execute(cmd,streamIn=None,wd='./'): - ''' - executes a command in given directory and returns stdout and stderr for optional stdin - ''' - initialPath=os.getcwd() - os.chdir(wd) - process = subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE,stderr = subprocess.PIPE,stdin=subprocess.PIPE) - if streamIn != None: - out,error = process.communicate(streamIn.read()) - else: - out,error = process.communicate() - os.chdir(initialPath) - if process.returncode !=0: raise RuntimeError(cmd+' failed with returncode '+str(process.returncode)) - return out,error + pcov = np.inf + return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov) diff --git a/misc/DAMASK QR-Code.png b/misc/DAMASK QR-Code.png index 28a82c5cf..8fb9bace9 100644 Binary files a/misc/DAMASK QR-Code.png and b/misc/DAMASK QR-Code.png differ diff --git a/processing/misc/ang_toTable.py b/processing/misc/ang_toTable.py index 8820c514a..48b38f0b0 100755 --- a/processing/misc/ang_toTable.py +++ b/processing/misc/ang_toTable.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk -import numpy as np +import os from optparse import OptionParser import damask @@ -48,7 +47,7 @@ for name in filenames: table.labels_append(['1_Euler','2_Euler','3_Euler', '1_pos','2_pos', 'IQ','CI','PhaseID','Intensity','Fit', - ], # labels according to OIM Analysis 7.2 Manual, p 403 (of 517) + ], # OIM Analysis 7.2 Manual, p 403 (of 517) reset = True) # ------------------------------------------ assemble header --------------------------------------- diff --git a/processing/misc/calculateAnisotropy.py b/processing/misc/calculateAnisotropy.py index d93431f50..c390926bb 100644 --- a/processing/misc/calculateAnisotropy.py +++ b/processing/misc/calculateAnisotropy.py @@ -1,9 +1,9 @@ #!/usr/bin/python # -*- coding: UTF-8 no BOM -*- -import threading,time,os,subprocess,shlex,string +import threading,os,string import numpy as np -from optparse import OptionParser, OptionGroup +from optparse import OptionParser from shutil import copy2 from re import split import damask @@ -14,26 +14,10 @@ scriptID = ' '.join([scriptName,damask.version]) def list_split(option, opt, value, parser): setattr(parser.values, option.dest, value.split(',')) -def execute(cmd,streamIn=None,wd='./'): - ''' - executes a command in given directory and returns stdout and stderr for optional stdin - ''' - initialPath=os.getcwd() - os.chdir(wd) - process = subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE,stderr = subprocess.PIPE,stdin=subprocess.PIPE) - if streamIn != None: - out,error = process.communicate(streamIn.read()) - else: - out,error = process.communicate() - os.chdir(initialPath) - return out,error - #--------------------------------------------------------------------------------------------------- class myThread (threading.Thread): -#--------------------------------------------------------------------------------------------------- - ''' - Runner class - ''' + """Runner""" + def __init__(self, threadID): threading.Thread.__init__(self) self.threadID = threadID @@ -48,8 +32,6 @@ class myThread (threading.Thread): s.release() def doSim(delay,thread): -# s.acquire() and s.release() are couple -# global dirCurrent s.acquire() delta_angle = offsetPhi() @@ -63,22 +45,22 @@ def doSim(delay,thread): os.mkdir(dire,0755) for file in [options.geometry+'.geom',options.load+'.load','numerics.config']: copy2(dirCurrent+'/'+file, dire) - newMatConfig = newMaterialConfig(dirCurrent,delta_angle) + newMaterialConfig(dirCurrent,delta_angle) os.chdir(dire) if not os.path.isfile('%s_%s.spectralOut'%(options.geometry,options.load)): print('starting uniaxial tension in direction of angle %s from %s'%(file_angle,thread)) s.release() - execute('DAMASK_spectral -g %s -l %s'%(options.geometry,options.load)) + damask.util.execute('DAMASK_spectral -g %s -l %s'%(options.geometry,options.load)) else: s.release() s.acquire() if not os.path.isfile('./%s/%s_%s.txt'%('Rvalues',options.geometry,options.load)): print('starting post processing for angle %s from %s'%(file_angle,thread)) s.release() - execute('postResults --cr f,p -d %s %s_%s.spectralOut'%('Rvalues',options.geometry,options.load)) - execute('addCauchy ./%s/%s_%s.txt'%('Rvalues',options.geometry,options.load)) - execute('addStrainTensors -l -v ./%s/%s_%s.txt'%('Rvalues',options.geometry,options.load)) + damask.util.execute('postResults --cr f,p -d %s %s_%s.spectralOut'%('Rvalues',options.geometry,options.load)) + damask.util.execute('addCauchy ./%s/%s_%s.txt'%('Rvalues',options.geometry,options.load)) + damask.util.execute('addStrainTensors -l -v ./%s/%s_%s.txt'%('Rvalues',options.geometry,options.load)) print('post processing for angle %s from %s is finished'%(file_angle,thread)) else: @@ -122,7 +104,6 @@ def newMaterialConfig(dire,angle): line2 = line f.write(line2) f.close() - return True # -------------------------------------------------------------------- # MAIN @@ -135,16 +116,21 @@ strength anisotropic coefficients (normalized yield stress) """, version=string.replace(scriptID,'\n','\\n') ) -parser.add_option('-l','--load' , dest='load', type='string', - help='name of the load file [%default]', metavar='string') -parser.add_option('-g','--geometry', dest='geometry', type='string', - help='name of the geometry file [%default]', metavar='string') -parser.add_option('-s', '--strain', dest='strain', type='string', action='callback', callback=list_split, - help='the threshold strains, using comma to seperate multiple strains [%default]', metavar='string') -parser.add_option('-t','--threads', dest='threads', type='int', - help='number of parallel executions [%default]', metavar='int') -parser.add_option('-n','--number', dest='number', type='int', - help='Number of uni-axial tensile tests [%default]', metavar='int') +parser.add_option('-l','--load' , + dest='load', type='string', + help='name of the load file [%default]', metavar='string') +parser.add_option('-g','--geometry', + dest='geometry', type='string', + help='name of the geometry file [%default]', metavar='string') +parser.add_option('-s', '--strain', + dest='strain', type='string', action='callback', callback=list_split, + help='threshold strains, using comma to seperate multiple strains [%default]', metavar='string') +parser.add_option('-t','--threads', + dest='threads', type='int', + help='number of parallel executions [%default]', metavar='int') +parser.add_option('-n','--number', + dest='number', type='int', + help='Number of uni-axial tensile tests [%default]', metavar='int') parser.set_defaults(geometry = '20grains16x16x16') parser.set_defaults(load = 'tensionX') @@ -248,4 +234,4 @@ for i in xrange(2): aniso_list_ang_strain.append('none') writeformat = writeformat+'%-12s' f.write('%-10s'%file_angle + writeformat%(tuple(aniso_list_ang_strain))+'\n') -f.close() \ No newline at end of file +f.close() diff --git a/processing/misc/gwyddion_filter.py b/processing/misc/gwyddion_filter.py index 100c61976..7748b101f 100755 --- a/processing/misc/gwyddion_filter.py +++ b/processing/misc/gwyddion_filter.py @@ -1,33 +1,15 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,re,numpy,scipy.ndimage,scipy.signal,vtk +import os,string,scipy +import numpy as np import damask -from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP +from optparse import OptionParser scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -#-------------------------------------------------------------------------------------------------- -class extendedOption(Option): -#-------------------------------------------------------------------------------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) - - -parser = OptionParser(option_class=extendedOption, usage='%prog options [file[s]]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ Apply filter(s) to Gwyddion data. """ + string.replace(scriptID,'\n','\\n') ) @@ -59,7 +41,7 @@ for file in filenames: if pieces[1] == 'Height:': height = float(pieces[2]) header.append(line.lstrip('#').strip()) - elevation = numpy.loadtxt(file)#*1e6 + elevation = np.loadtxt(file)#*1e6 if options.opening > 0: elevation = scipy.ndimage.morphology.grey_opening(elevation,options.opening) @@ -80,5 +62,5 @@ for file in filenames: elevation = scipy.ndimage.filters.median_filter(elevation,options.median) filters += '_median%i'%options.median - numpy.savetxt(os.path.splitext(file)[0]+filters+os.path.splitext(file)[1],elevation,header='\n'.join(header)) + np.savetxt(os.path.splitext(file)[0]+filters+os.path.splitext(file)[1],elevation,header='\n'.join(header)) diff --git a/processing/misc/vtk_fromGwyddion.py b/processing/misc/vtk_fromGwyddion.py index 398999c75..6f13a2e0f 100755 --- a/processing/misc/vtk_fromGwyddion.py +++ b/processing/misc/vtk_fromGwyddion.py @@ -1,9 +1,10 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,re,numpy,scipy.ndimage,scipy.signal,vtk +import os,string,vtk +import numpy as np import damask -from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP +from optparse import OptionParser scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -26,26 +27,7 @@ scalingFactor = { \ }, } -#-------------------------------------------------------------------------------------------------- -class extendedOption(Option): -#-------------------------------------------------------------------------------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) - - -parser = OptionParser(option_class=extendedOption, usage='%prog options [file[s]]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ Produce VTK rectilinear grid from Gwyddion dataset exported as text. """ + string.replace(scriptID,'\n','\\n') ) @@ -78,16 +60,16 @@ for file in filenames: if options.scaling == 0.0: options.scaling = scalingFactor[lateralunit][elevationunit] - elevation = numpy.loadtxt(file)*options.scaling + elevation = np.loadtxt(file)*options.scaling grid = vtk.vtkRectilinearGrid() grid.SetDimensions(elevation.shape[1],elevation.shape[0],1) xCoords = vtk.vtkDoubleArray() - for x in numpy.arange(0.0,width,width/elevation.shape[1],'d'): + for x in np.arange(0.0,width,width/elevation.shape[1],'d'): xCoords.InsertNextValue(x) yCoords = vtk.vtkDoubleArray() - for y in numpy.arange(0.0,height,height/elevation.shape[0],'d'): + for y in np.arange(0.0,height,height/elevation.shape[0],'d'): yCoords.InsertNextValue(y) zCoords = vtk.vtkDoubleArray() zCoords.InsertNextValue(0.0) @@ -99,8 +81,8 @@ for file in filenames: vector = vtk.vtkFloatArray() vector.SetName("elevation"); vector.SetNumberOfComponents(3); - vector.SetNumberOfTuples(numpy.prod(elevation.shape)); - for i,z in enumerate(numpy.ravel(elevation)): + vector.SetNumberOfTuples(np.prod(elevation.shape)); + for i,z in enumerate(np.ravel(elevation)): vector.SetTuple3(i,0,0,z) grid.GetPointData().AddArray(vector) diff --git a/processing/misc/yieldSurface.py b/processing/misc/yieldSurface.py index b6ca40265..44777bf52 100755 --- a/processing/misc/yieldSurface.py +++ b/processing/misc/yieldSurface.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: UTF-8 no BOM -*- -import threading,time,os,subprocess,string,sys +import threading,time,os import numpy as np from optparse import OptionParser import damask @@ -56,10 +56,11 @@ def runFit(exponent, eqStress, dimension, criterion): damask.util.croak(fitResidual) def principalStresses(sigmas): - ''' - computes principal stresses (i.e. eigenvalues) for a set of Cauchy stresses. - sorted in descending order. - ''' + """ + computes principal stresses (i.e. eigenvalues) for a set of Cauchy stresses. + + sorted in descending order. + """ lambdas=np.zeros(0,'d') for i in xrange(np.shape(sigmas)[1]): eigenvalues = np.linalg.eigvalsh(sym6toT33(sigmas[:,i])) @@ -82,27 +83,25 @@ def principalStress(p): t1 + t2*np.cos(phi+np.pi*4.0/3.0)]) def principalStrs_Der(p, (s1, s2, s3, s4, s5, s6), dim, Karafillis=False): - ''' - Derivative of principal stress with respect to stress - ''' + """Derivative of principal stress with respect to stress""" third = 1.0/3.0 third2 = 2.0*third I = invariant(p) I1s3I2= np.sqrt(I[0]**2 - 3.0*I[1]) - numer = 2.0*I1**3 - 9.0*I[0]*I[1] + 27.0*I[2] + numer = 2.0*I[0]**3 - 9.0*I[0]*I[1] + 27.0*I[2] denom = 2.0*I1s3I2**3 cs = numer/denom phi = np.arccos(cs)/3.0 dphidcs = -third/np.sqrt(1.0 - cs**2) dcsddenom = 0.5*numer*(-1.5)*I1s3I2**(-5.0) - dcsdI1 = (6.0*I1**2 - 9.0*I2)*denom + dcsddenom*(2.0*I1) - dcsdI2 = ( - 9.0*I1)*denom + dcsddenom*(-3.0) + dcsdI1 = (6.0*I[0]**2 - 9.0*I[1])*denom + dcsddenom*(2.0*I[0]) + dcsdI2 = ( - 9.0*I[0])*denom + dcsddenom*(-3.0) dcsdI3 = 27.0*denom dphidI1, dphidI2, dphidI3 = dphidcs*dcsdI1, dphidcs*dcsdI2, dphidcs*dcsdI3 - dI1s3I2dI1 = I1/I1s3I2 + dI1s3I2dI1 = I[0]/I1s3I2 dI1s3I2dI2 = -1.5/I1s3I2 tcoeff = third2*I1s3I2 @@ -150,13 +149,13 @@ def math_ln(x): return np.log(x + 1.0e-32) def sym6toT33(sym6): - ''' Shape the symmetric stress tensor(6) into (3,3) ''' + """Shape the symmetric stress tensor(6) into (3,3)""" return np.array([[sym6[0],sym6[3],sym6[5]], [sym6[3],sym6[1],sym6[4]], [sym6[5],sym6[4],sym6[2]]]) def t33toSym6(t33): - ''' Shape the stress tensor(3,3) into symmetric (6) ''' + """Shape the stress tensor(3,3) into symmetric (6)""" return np.array([ t33[0,0], t33[1,1], t33[2,2], @@ -165,9 +164,6 @@ def t33toSym6(t33): (t33[2,0] + t33[0,2])/2.0,]) # * * 2 class Criteria(object): - ''' - needs doc string - ''' def __init__(self, criterion, uniaxialStress,exponent, dimension): self.stress0 = uniaxialStress if exponent < 0.0: # Fitting exponent m @@ -183,9 +179,8 @@ class Criteria(object): return self.func(self.stress0, paras, sigmas,self.mFix,self.criteria,self.dim,Jac=True) class Vegter(object): - ''' - Vegter yield criterion - ''' + """Vegter yield criterion""" + def __init__(self, refPts, refNormals,nspace=11): self.refPts, self.refNormals = self._getRefPointsNormals(refPts, refNormals) self.hingePts = self._getHingePoints() @@ -211,11 +206,12 @@ class Vegter(object): return refPts,refNormals def _getHingePoints(self): - ''' - calculate the hinge point B according to the reference points A,C and the normals n,m - refPoints = np.array([[p1_x, p1_y], [p2_x, p2_y]]); - refNormals = np.array([[n1_x, n1_y], [n2_x, n2_y]]) - ''' + """ + calculate the hinge point B according to the reference points A,C and the normals n,m + + refPoints = np.array([[p1_x, p1_y], [p2_x, p2_y]]); + refNormals = np.array([[n1_x, n1_y], [n2_x, n2_y]]) + """ def hingPoint(points, normals): A1 = points[0][0]; A2 = points[0][1] C1 = points[1][0]; C2 = points[1][1] @@ -235,9 +231,7 @@ class Vegter(object): return np.array([bezier(self.refPts[i:i+2],self.hingePts[i]) for i in xrange(len(self.refPts)-1)]) def VetgerCriterion(stress,lankford, rhoBi0, theta=0.0): - ''' - 0-pure shear; 1-uniaxial; 2-plane strain; 3-equi-biaxial - ''' + """0-pure shear; 1-uniaxial; 2-plane strain; 3-equi-biaxial""" def getFourierParas(r): # get the value after Fourier transformation nset = len(r) @@ -262,12 +256,6 @@ def VetgerCriterion(stress,lankford, rhoBi0, theta=0.0): for j in xrange(3): refPts[j,i] = np.dot(getFourierParas(strsSet[:,j,i]), fouriercoeffs) - rhoUn = np.dot(getFourierParas(-lankford/(lankford+1)), fouriercoeffs) - rhoBi = (rhoBi0+1 + (rhoBi0-1)*np.cos(2.0*theta))/(rhoBi0+1 - (rhoBi0-1)*np.cos(2.0*theta)) - nVec = lambda rho : np.array([1.0,rho]/np.sqrt(1.0+rho**2)) - refNormals = np.array([nVec(-1.0),nVec(rhoUn),nVec(0.0),nVec(rhoBi)]) - - vegter = Vegter(refPts, refNormals) def Tresca(eqStress=None, #not needed/supported paras=None, @@ -276,10 +264,11 @@ def Tresca(eqStress=None, #not needed/supported criteria=None, #not needed/supported dim=3, Jac=False): - ''' - Tresca yield criterion - the fitted parameters is: paras(sigma0) - ''' + """ + Tresca yield criterion + + the fitted parameter is paras(sigma0) + """ if not Jac: lambdas = principalStresses(sigmas) r = np.amax(np.array([abs(lambdas[2,:]-lambdas[1,:]),\ @@ -296,13 +285,14 @@ def Cazacu_Barlat(eqStress=None, criteria=None, dim=3, #2D also possible Jac=False): - ''' - Cazacu-Barlat (CB) yield criterion - the fitted parameters are: - a1,a2,a3,a6; b1,b2,b3,b4,b5,b10; c for plane stress - a1,a2,a3,a4,a5,a6; b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11; c: for general case - mFix are invalid input - ''' + """ + Cazacu-Barlat (CB) yield criterion + + the fitted parameters are: + a1,a2,a3,a6; b1,b2,b3,b4,b5,b10; c for plane stress + a1,a2,a3,a4,a5,a6; b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11; c: for general case + mFix is ignored + """ s11,s22,s33,s12,s23,s31 = sigmas if dim == 2: (a1,a2,a3,a4), (b1,b2,b3,b4,b5,b10), c = paras[0:4],paras[4:10],paras[10] @@ -356,13 +346,14 @@ def Drucker(eqStress=None,#not needed/supported criteria=None, dim=3, Jac=False): - ''' - Drucker yield criterion - the fitted parameters are - sigma0, C_D for Drucker(p=1); - sigma0, C_D, p for general Drucker - eqStress, mFix are invalid inputs - ''' + """ + Drucker yield criterion + + the fitted parameters are + sigma0, C_D for Drucker(p=1); + sigma0, C_D, p for general Drucker + eqStress, mFix are invalid inputs + """ if criteria == 'drucker': sigma0, C_D= paras p = 1.0 @@ -386,7 +377,7 @@ def Drucker(eqStress=None,#not needed/supported if criteria == 'drucker': return np.vstack((-r/sigma0, -drdl*J3_2p)).T else: - dldp = 3.0*J2_3p*math_ln(J2) - 2.0*C_D*J3_2p*math_ln(J3) + dldp = 3.0*J2_3p*math_ln(J[1]) - 2.0*C_D*J3_2p*math_ln(J[2]) jp = drdl*dldp + r*math_ln(left)/(-6.0*p*p) if mFix[0]: return np.vstack((-r/sigma0, -drdl*J3_2p)).T @@ -399,12 +390,13 @@ def Hill1948(eqStress=None,#not needed/supported criteria=None,#not needed/supported dim=3, Jac=False): - ''' - Hill 1948 yield criterion - the fitted parameters are: - F, G, H, L, M, N for 3D - F, G, H, N for 2D - ''' + """ + Hill 1948 yield criterion + + the fitted parameters are: + F, G, H, L, M, N for 3D + F, G, H, N for 2D + """ s11,s22,s33,s12,s23,s31 = sigmas if dim == 2: # plane stress jac = np.array([ s22**2, s11**2, (s11-s22)**2, 2.0*s12**2]) @@ -423,11 +415,11 @@ def Hill1979(eqStress=None,#not needed/supported criteria=None,#not needed/supported dim=3, Jac=False): - ''' - Hill 1979 yield criterion - the fitted parameters are: f,g,h,a,b,c,m - ''' - + """ + Hill 1979 yield criterion + + the fitted parameters are: f,g,h,a,b,c,m + """ if mFix[0]: m = mFix[1] else: @@ -458,14 +450,14 @@ def Hosford(eqStress=None, criteria=None, dim=3, Jac=False): - ''' - Hosford family criteria - the fitted parameters are: - von Mises: sigma0 - Hershey: (1) sigma0, a, when a is not fixed; (2) sigma0, when a is fixed - general Hosford: (1) F,G,H, a, when a is not fixed; (2) F,G,H, when a is fixed - ''' + """ + Hosford family criteria + the fitted parameters are: + von Mises: sigma0 + Hershey: (1) sigma0, a, when a is not fixed; (2) sigma0, when a is fixed + general Hosford: (1) F,G,H, a, when a is not fixed; (2) F,G,H, when a is fixed + """ if criteria == 'vonmises': sigma0 = paras coeff = np.ones(3) @@ -509,11 +501,12 @@ def Barlat1989(eqStress=None, criteria=None, dim=3, Jac=False): - ''' - Barlat-Lian 1989 yield criteria - the fitted parameters are: - Anisotropic: a, h, p, m; m is optional - ''' + """ + Barlat-Lian 1989 yield criteria + + the fitted parameters are: + Anisotropic: a, h, p, m; m is optional + """ a, h, p = paras[0:3] if mFix[0]: m = mFix[1] else: m = paras[-1] @@ -536,7 +529,7 @@ def Barlat1989(eqStress=None, drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) dldm = np.dot(np.array([a,a,c]),fm*math_ln(fs))*0.5 - ja = drdl*dlda + ja,jc = drdl*dlda, drdl*dldc jh,jp = drdl*(dldk1*dk1dh + dldk2*dk2dh), drdl*dldk2*dk2dp jm = drdl*dldm + drdm @@ -544,13 +537,14 @@ def Barlat1989(eqStress=None, else: return np.vstack((ja,jc,jh,jp,jm)).T def Barlat1991(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - Barlat 1991 criteria - the fitted parameters are: - Anisotropic: a, b, c, f, g, h, m for 3D - a, b, c, h, m for plane stress - m is optional - ''' + """ + Barlat 1991 criteria + + the fitted parameters are: + Anisotropic: a, b, c, f, g, h, m for 3D + a, b, c, h, m for plane stress + m is optional + """ if dim == 2: coeff = paras[0:4] # plane stress else: coeff = paras[0:6] # general case if mFix[0]: m = mFix[1] @@ -605,12 +599,13 @@ def Barlat1991(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): else: return np.vstack((dfdI2*dI2dx + dfdI3*dI3dx, jm)).T def BBC2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - BBC2000 yield criterion - the fitted parameters are - d,e,f,g, b,c,a, k; k is optional - criteria are invalid input - ''' + """ + BBC2000 yield criterion + + the fitted parameters are + d,e,f,g, b,c,a, k; k is optional + criteria are invalid input + """ d,e,f,g, b,c,a= paras[0:7] if mFix[0]: k = mFix[1] else: k = paras[-1] @@ -647,12 +642,13 @@ def BBC2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): def BBC2003(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - BBC2003 yield criterion - the fitted parameters are - M,N,P,Q,R,S,T,a, k; k is optional - criteria are invalid input - ''' + """ + BBC2003 yield criterion + + the fitted parameters are + M,N,P,Q,R,S,T,a, k; k is optional + criteria are invalid input + """ M,N,P,Q,R,S,T,a = paras[0:8] if mFix[0]: k = mFix[1] else: k = paras[-1] @@ -689,12 +685,13 @@ def BBC2003(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): else : return np.vstack((J, drdl*dldk+drdk)).T def BBC2005(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - BBC2005 yield criterion - the fitted parameters are - a, b, L ,M, N, P, Q, R, k; k is optional - criteria are invalid input - ''' + """ + BBC2005 yield criterion + + the fitted parameters are + a, b, L ,M, N, P, Q, R, k k are optional + criteria is invalid input + """ a,b,L, M, N, P, Q, R = paras[0:8] if mFix[0]: k = mFix[1] else: k = paras[-1] @@ -739,10 +736,12 @@ def BBC2005(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): else : return np.vstack(J, dldk+dsBarde*dedk).T def Yld2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - C: c11,c22,c66 c12=c21=1.0 JAC NOT PASS - D: d11,d12,d21,d22,d66 - ''' + """ + Yld2000 yield criterion + + C: c11,c22,c66 c12=c21=1.0 JAC NOT PASS + D: d11,d12,d21,d22,d66 + """ C,D = paras[0:3], paras[3:8] if mFix[0]: m = mFix[1] else: m = paras[-1] @@ -769,8 +768,7 @@ def Yld2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): drdl, drdm = r/m/left, r*math_ln(0.5*left)*(-1.0/m/m) #/(-m*m) dldm = ( phi1*math_ln(phi1s) + phi21*math_ln(phi21s) + phi22*math_ln(phi22s) )*0.5 zero = np.zeros_like(s11); num = len(s11) - def dPrincipalds((X1,X2,X12)): - # the derivative of principla with regards to stress + def dPrincipalds((X1,X2,X12)): # derivative of principla with respect to stress temp = 1.0/np.sqrt( (X1-X2)**2 + 4.0*X12**2 ) dP1dsi = 0.5*np.array([ 1.0+temp*(X1-X2), 1.0-temp*(X1-X2), temp*4.0*X12]) dP2dsi = 0.5*np.array([ 1.0-temp*(X1-X2), 1.0+temp*(X1-X2), -temp*4.0*X12]) @@ -798,14 +796,15 @@ def Yld2000(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): else: return np.vstack((jC,jD,jm)).T def Yld200418p(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - Yld2004-18p yield criterion - the fitted parameters are - C: c12,c21,c23,c32,c31,c13,c44,c55,c66; D: d12,d21,d23,d32,d31,d13,d44,d55,d66 for 3D - C: c12,c21,c23,c32,c31,c13,c44; D: d12,d21,d23,d32,d31,d13,d44 for 2D - and m, m is optional - criteria are invalid input - ''' + """ + Yld2004-18p yield criterion + + the fitted parameters are + C: c12,c21,c23,c32,c31,c13,c44,c55,c66; D: d12,d21,d23,d32,d31,d13,d44,d55,d66 for 3D + C: c12,c21,c23,c32,c31,c13,c44; D: d12,d21,d23,d32,d31,d13,d44 for 2D + and m, m are optional + criteria is ignored + """ if dim == 2: C,D = np.append(paras[0:7],[0.0,0.0]), np.append(paras[7:14],[0.0,0.0]) else: C,D = paras[0:9], paras[9:18] if mFix[0]: m = mFix[1] @@ -843,14 +842,15 @@ def Yld200418p(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): else: return np.vstack((jc,jd,jm)).T def KarafillisBoyce(eqStress, paras, sigmas, mFix, criteria, dim, Jac=False): - ''' - Karafillis-Boyce - the fitted parameters are - c11,c12,c13,c14,c15,c16,c,m for 3D - c11,c12,c13,c14,c,m for plane stress - 0 0.0: nExponent = nExpo + if options.exponent > 0.0: nExponent = options.exponent else: nExponent = 0 nameCriterion = self.name.lower() criteria = Criteria(nameCriterion,self.uniaxial,self.expo, self.dimen) @@ -1225,13 +1213,10 @@ class Criterion(object): pass return popt - #--------------------------------------------------------------------------------------------------- class myThread (threading.Thread): -#--------------------------------------------------------------------------------------------------- - ''' - Runner class - ''' + """Runner""" + def __init__(self, threadID): threading.Thread.__init__(self) self.threadID = threadID @@ -1246,8 +1231,6 @@ class myThread (threading.Thread): s.release() def doSim(thread): - -# if load case do not exist, create new one s.acquire() global myLoad loadNo=loadcaseNo() @@ -1337,7 +1320,7 @@ def doSim(thread): strainAll[i]=np.append(strainAll[i], deformationRate[i]) f.write( str(threshold)+' '+ ' '.join(map(str,myFit.fit(stressAll[i].reshape(len(stressAll[i])//6,6).transpose())))+'\n') - except Exception as detail: + except Exception: damask.util.croak('Could not fit results of simulation (%s)'%thread) s.release() return @@ -1440,7 +1423,7 @@ else : stressUnit = 1.0e6 if options.dimension not in fitCriteria[options.criterion]['dimen']: parser.error('invalid dimension for selected criterion') -if options.criterion not in ['vonmises','tresca','drucker','hill1984'] and options.eqStress == None: +if options.criterion not in ['vonmises','tresca','drucker','hill1984'] and options.eqStress is None: parser.error('please specifie an equivalent stress (e.g. fitting to von Mises)') run = runFit(options.exponent, options.eqStress, options.dimension, options.criterion) diff --git a/processing/post/3Dvisualize.py b/processing/post/3Dvisualize.py index 8abfe9676..c18fbb694 100755 --- a/processing/post/3Dvisualize.py +++ b/processing/post/3Dvisualize.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,re,string,fnmatch,vtk +import os,sys,re,fnmatch,vtk import numpy as np from optparse import OptionParser import damask @@ -56,8 +56,7 @@ def unravel(item): # ++++++++++++++++++++++++++++++++++++++++++++++++++++ def vtk_writeASCII_mesh(mesh,data,res,sep): -# ++++++++++++++++++++++++++++++++++++++++++++++++++++ - """ function writes data array defined on a hexahedral mesh (geometry) """ + """function writes data array defined on a hexahedral mesh (geometry)""" info = {\ 'tensor': {'name':'tensor','len':9},\ 'vector': {'name':'vector','len':3},\ @@ -110,11 +109,10 @@ def vtk_writeASCII_mesh(mesh,data,res,sep): ] return cmds - -# +++++++++++++++++++++++++++++++++++++++++++++++++++ + +#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ def vtk_writeASCII_points(coordinates,data,res,sep): -# +++++++++++++++++++++++++++++++++++++++++++++++++++ - """ function writes data array defined on a point field """ + """function writes data array defined on a point field""" N = res[0]*res[1]*res[2] cmds = [\ @@ -216,7 +214,7 @@ for filename in args: content = file.readlines() file.close() m = re.search('(\d+)\s*head', content[0].lower()) - if m == None: + if m is None: continue print filename,'\n' sys.stdout.flush() @@ -432,4 +430,4 @@ for filename in args: vtk = open(os.path.join(head,what+'_'+os.path.splitext(tail)[0]+'.vtk'), 'w') output(out[what],{'filepointer':vtk},'File') vtk.close() - print + print \ No newline at end of file diff --git a/processing/post/addAPS34IDEstrainCoords.py b/processing/post/addAPS34IDEstrainCoords.py index 2dcfaa98f..0258606ca 100755 --- a/processing/post/addAPS34IDEstrainCoords.py +++ b/processing/post/addAPS34IDEstrainCoords.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -19,13 +19,13 @@ Transform X,Y,Z,F APS BeamLine 34 coordinates to x,y,z APS strain coordinates. """, version = scriptID) -parser.add_option('-f','--frame', dest='frame', nargs=4, type='string', metavar='', +parser.add_option('-f','--frame', dest='frame', nargs=4, type='string', metavar='string string string string', help='APS X,Y,Z coords, and depth F') parser.set_defaults(frame = None) (options,filenames) = parser.parse_args() -if options.frame == None: +if options.frame is None: parser.error('no data column specified...') @@ -33,7 +33,7 @@ datainfo = {'len':3, 'label':[] } -if options.frame != None: datainfo['label'] += options.frame +datainfo['label'] += options.frame # --- loop over input files ------------------------------------------------------------------------- if filenames == []: @@ -75,8 +75,8 @@ for name in filenames: # ------------------------------------------ process data ------------------------------------------ theta=-0.75*np.pi RotMat2TSL=np.array([[1., 0., 0.], - [0., np.cos(theta), np.sin(theta)], - [0., -np.sin(theta), np.cos(theta)]]) # Orientation Matrix to account for -135 degree rotation for TSL Convention[Adapted from Chen Zhang's code] + [0., np.cos(theta), np.sin(theta)], # Orientation to account for -135 deg + [0., -np.sin(theta), np.cos(theta)]]) # rotation for TSL convention vec = np.zeros(4) outputAlive = True @@ -94,4 +94,4 @@ for name in filenames: table.input_close() # close input ASCII table (works for stdin) table.output_close() # close output ASCII table (works for stdout) if file['name'] != 'STDIN': - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new + os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new \ No newline at end of file diff --git a/processing/post/addCalculation.py b/processing/post/addCalculation.py index dea3932e9..7d7727a18 100755 --- a/processing/post/addCalculation.py +++ b/processing/post/addCalculation.py @@ -1,8 +1,8 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,string -import math # flake8: noqa +import os,re,sys +import math # noqa import numpy as np from optparse import OptionParser import damask @@ -39,7 +39,7 @@ parser.add_option('-f','--formula', (options,filenames) = parser.parse_args() -if options.labels == None or options.formulas == None: +if options.labels is None or options.formulas is None: parser.error('no formulas and/or labels specified.') if len(options.labels) != len(options.formulas): parser.error('number of labels ({}) and formulas ({}) do not match.'.format(len(options.labels),len(options.formulas))) @@ -126,4 +126,4 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- - table.close() # close ASCII tables + table.close() # close ASCII tables \ No newline at end of file diff --git a/processing/post/addCauchy.py b/processing/post/addCauchy.py index 4dd1843bf..3fb889eec 100755 --- a/processing/post/addCauchy.py +++ b/processing/post/addCauchy.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask diff --git a/processing/post/addCauchyX.py b/processing/post/addCauchyX.py index 982bb6961..dc94e2a2e 100755 --- a/processing/post/addCauchyX.py +++ b/processing/post/addCauchyX.py @@ -1,36 +1,16 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,math,string,h5py +import os,string,h5py import numpy as np +from optparse import OptionParser import damask -from optparse import OptionParser, Option - -# ----------------------------- -class extendableOption(Option): -# ----------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) - - # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- -parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ Add column(s) containing Cauchy stress based on given column(s) of deformation gradient and first Piola--Kirchhoff stress. @@ -50,7 +30,7 @@ parser.set_defaults(output = 'crystallite') (options,filenames) = parser.parse_args() -if options.defgrad == None or options.stress == None or options.output == None: +if options.defgrad is None or options.stress is None or options.output is None: parser.error('missing data column...') @@ -79,6 +59,3 @@ for myFile in files: cauchy[p,...] = 1.0/np.linalg.det(defgrad[p,...])*np.dot(stress[p,...],defgrad[p,...].T) # [Cauchy] = (1/det(F)) * [P].[F_transpose] cauchyFile = myFile['file']['increments/'+inc+'/'+options.output+'/'+instance].create_dataset('cauchy', data=cauchy) cauchyFile.attrs['units'] = 'Pa' - - - diff --git a/processing/post/addCompatibilityMismatch.py b/processing/post/addCompatibilityMismatch.py index 6c90fe0e4..f237f7d19 100755 --- a/processing/post/addCompatibilityMismatch.py +++ b/processing/post/addCompatibilityMismatch.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -81,7 +81,6 @@ for name in filenames: table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) if options.shape: table.labels_append('shapeMismatch({})'.format(options.defgrad)) if options.volume: table.labels_append('volMismatch({})'.format(options.defgrad)) - #table.head_write() # --------------- figure out size and grid --------------------------------------------------------- @@ -92,7 +91,7 @@ for name in filenames: maxcorner = np.array(map(max,coords)) grid = np.array(map(len,coords),'i') size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings N = grid.prod() @@ -116,8 +115,8 @@ for name in filenames: while table.data_read(): (x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count idx += 1 - F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3) - print 'hm' + F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3) + Favg = damask.core.math.tensorAvg(F) centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0]) diff --git a/processing/post/addCumulative.py b/processing/post/addCumulative.py index 2233a784e..a65677883 100755 --- a/processing/post/addCumulative.py +++ b/processing/post/addCumulative.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask diff --git a/processing/post/addCurl.py b/processing/post/addCurl.py index 6e2affd9a..683fc0631 100755 --- a/processing/post/addCurl.py +++ b/processing/post/addCurl.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -10,40 +10,35 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def curlFFT(geomdim,field): - N = grid.prod() # field size - n = np.array(np.shape(field)[3:]).prod() # data size + grid = np.array(np.shape(field)[2::-1]) + N = grid.prod() # field size + n = np.array(np.shape(field)[3:]).prod() # data size - if n == 3: - dataType = 'vector' - elif n == 9: - dataType = 'tensor' + if n == 3: dataType = 'vector' + elif n == 9: dataType = 'tensor' field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) curl_fourier = np.zeros(field_fourier.shape,'c16') # differentiation in Fourier space k_s = np.zeros([3],'i') - TWOPIIMG = (0.0+2.0j*math.pi) + TWOPIIMG = 2.0j*math.pi for i in xrange(grid[2]): k_s[0] = i - if(grid[2]%2==0 and i == grid[2]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[0]=0 - elif (i > grid[2]//2): - k_s[0] = k_s[0] - grid[2] + if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif i > grid[2]//2: k_s[0] -= grid[2] for j in xrange(grid[1]): k_s[1] = j - if(grid[1]%2==0 and j == grid[1]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[1]=0 - elif (j > grid[1]//2): - k_s[1] = k_s[1] - grid[1] + if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif j > grid[1]//2: k_s[1] -= grid[1] for k in xrange(grid[0]//2+1): k_s[2] = k - if(grid[0]%2==0 and k == grid[0]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[2]=0 + if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + + xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order - xi = np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16') if dataType == 'tensor': for l in xrange(3): curl_fourier[i,j,k,0,l] = ( field_fourier[i,j,k,l,2]*xi[1]\ @@ -76,23 +71,23 @@ Deals with both vector- and tensor-valued fields. parser.add_option('-c','--coordinates', dest = 'coords', - type = 'string', metavar='string', - help = 'column heading for coordinates [%default]') + type = 'string', metavar = 'string', + help = 'column label of coordinates [%default]') parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '', - help = 'heading of columns containing vector field values') + help = 'column label(s) of vector field values') parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '', - help = 'heading of columns containing tensor field values') + help = 'column label(s) of tensor field values') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', ) (options,filenames) = parser.parse_args() -if options.vector == None and options.tensor == None: +if options.vector is None and options.tensor is None: parser.error('no data column specified.') # --- loop over input files ------------------------------------------------------------------------- @@ -100,10 +95,8 @@ if options.vector == None and options.tensor == None: if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name,buffered = False) - except: - continue + try: table = damask.ASCIItable(name = name,buffered = False) + except: continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ @@ -161,8 +154,9 @@ for name in filenames: stack = [table.data] for type, data in items.iteritems(): for i,label in enumerate(data['active']): - stack.append(curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation - table.data[:,data['column'][i]:data['column'][i]+data['dim']].\ + # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation + stack.append(curlFFT(size[::-1], + table.data[:,data['column'][i]:data['column'][i]+data['dim']]. reshape([grid[2],grid[1],grid[0]]+data['shape']))) # ------------------------------------------ output result ----------------------------------------- diff --git a/processing/post/addDeformedConfiguration.py b/processing/post/addDeformedConfiguration.py deleted file mode 100755 index 0f5a5abc0..000000000 --- a/processing/post/addDeformedConfiguration.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 no BOM -*- - -import os,sys,string,math -import numpy as np -from optparse import OptionParser -import damask - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -#-------------------------------------------------------------------------------------------------- -def deformedCoordsFFT(F,undeformed=False): -#-------------------------------------------------------------------------------------------------- - wgt = 1.0/grid.prod() - integrator = np.array([0.+1.j,0.+1.j,0.+1.j],'c16') * size/ 2.0 / math.pi - step = size/grid - - F_fourier = np.fft.rfftn(F,axes=(0,1,2)) - coords_fourier = np.zeros(F_fourier.shape[0:4],'c16') - - if undeformed: - Favg=np.eye(3) - else: - Favg=np.real(F_fourier[0,0,0,:,:])*wgt -#-------------------------------------------------------------------------------------------------- -# integration in Fourier space - k_s = np.zeros([3],'i') - for i in xrange(grid[2]): - k_s[2] = i - if(i > grid[2]//2 ): k_s[2] = k_s[2] - grid[2] - for j in xrange(grid[1]): - k_s[1] = j - if(j > grid[1]//2 ): k_s[1] = k_s[1] - grid[1] - for k in xrange(grid[0]//2+1): - k_s[0] = k - for m in xrange(3): - coords_fourier[i,j,k,m] = sum(F_fourier[i,j,k,m,0:3]*k_s*integrator) - if (any(k_s != 0)): - coords_fourier[i,j,k,0:3] /= -sum(k_s*k_s) - -#-------------------------------------------------------------------------------------------------- -# add average to scaled fluctuation and put (0,0,0) on (0,0,0) - coords = np.fft.irfftn(coords_fourier,F.shape[0:3],axes=(0,1,2)) - - offset_coords = np.dot(F[0,0,0,:,:],step/2.0) - scaling*coords[0,0,0,0:3] - for z in xrange(grid[2]): - for y in xrange(grid[1]): - for x in xrange(grid[0]): - coords[z,y,x,0:3] = scaling*coords[z,y,x,0:3] \ - + offset_coords \ - + np.dot(Favg,step*np.array([x,y,z])) - - return coords - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ -Add deformed configuration of given initial coordinates. -Operates on periodic three-dimensional x,y,z-ordered data sets. - -""", version = scriptID) - -parser.add_option('-f', '--defgrad',dest='defgrad', metavar = 'string', - help='heading of deformation gradient columns [%default]') -parser.add_option('--reference', dest='undeformed', action='store_true', - help='map results to reference (undeformed) average configuration [%default]') -parser.add_option('--scaling', dest='scaling', action='extend', metavar = '', - help='scaling of fluctuation') -parser.add_option('-u', '--unitlength', dest='unitlength', type='float', metavar = 'float', - help='set unit length for 2D model [%default]') -parser.add_option('--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') - -parser.set_defaults(defgrad = 'f') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(scaling = []) -parser.set_defaults(undeformed = False) -parser.set_defaults(unitlength = 0.0) - -(options,filenames) = parser.parse_args() - -options.scaling += [1.0 for i in xrange(max(0,3-len(options.scaling)))] -scaling = map(float, options.scaling) - - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) - - if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad)) - else: colF = table.label_index(options.defgrad) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] - mincorner = np.array(map(min,coords)) - maxcorner = np.array(map(max,coords)) - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings - - N = grid.prod() - - if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for coord in xrange(3): - label = '{}_{}_{}'.format(coord+1,options.defgrad,options.coords) - if np.any(scaling) != 1.0: label+='_{}_{}_{}'.format(scaling) - if options.undeformed: label+='_undeformed' - table.labels_append([label]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ read deformation gradient field ----------------------- - centroids = deformedCoordsFFT(table.data[:,colF:colF+9].reshape(grid[2],grid[1],grid[0],3,3), - options.undeformed) -# ------------------------------------------ process data ------------------------------------------ - table.data_rewind() - for z in xrange(grid[2]): - for y in xrange(grid[1]): - for x in xrange(grid[0]): - table.data_read() - table.data_append(list(centroids[z,y,x,:])) - table.data_write() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables diff --git a/processing/post/addDeterminant.py b/processing/post/addDeterminant.py index 263133bfa..c537e5bd0 100755 --- a/processing/post/addDeterminant.py +++ b/processing/post/addDeterminant.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys from optparse import OptionParser import damask @@ -32,7 +32,7 @@ parser.add_option('-t','--tensor', (options,filenames) = parser.parse_args() -if options.tensor == None: +if options.tensor is None: parser.error('no data column specified.') # --- loop over input files ------------------------------------------------------------------------- @@ -89,4 +89,4 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- - table.close() # close input ASCII table (works for stdin) + table.close() # close input ASCII table (works for stdin) \ No newline at end of file diff --git a/processing/post/addDeviator.py b/processing/post/addDeviator.py index bc6319c7e..492f44c70 100755 --- a/processing/post/addDeviator.py +++ b/processing/post/addDeviator.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys from optparse import OptionParser import damask @@ -10,7 +10,7 @@ scriptID = ' '.join([scriptName,damask.version]) oneThird = 1.0/3.0 -def deviator(m,spherical = False): # Carefull, do not change the value of m (its intent(inout)!) +def deviator(m,spherical = False): # Careful, do not change the value of m, its intent(inout)! sph = oneThird*(m[0]+m[4]+m[8]) dev = [ m[0]-sph, m[1], m[2], @@ -39,7 +39,7 @@ parser.add_option('-s','--spherical', (options,filenames) = parser.parse_args() -if options.tensor == None: +if options.tensor is None: parser.error('no data column specified...') # --- loop over input files ------------------------------------------------------------------------- @@ -101,4 +101,4 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- - table.close() # close input ASCII table (works for stdin) + table.close() # close input ASCII table (works for stdin) \ No newline at end of file diff --git a/processing/post/addDisplacements.py b/processing/post/addDisplacements.py new file mode 100755 index 000000000..b73994bde --- /dev/null +++ b/processing/post/addDisplacements.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 no BOM -*- + +import os,sys,math +import numpy as np +import scipy.ndimage +from optparse import OptionParser +import damask + +scriptName = os.path.splitext(os.path.basename(__file__))[0] +scriptID = ' '.join([scriptName,damask.version]) + + +#-------------------------------------------------------------------------------------------------- +def cell2node(cellData,grid): + + nodeData = 0.0 + datalen = np.array(cellData.shape[3:]).prod() + + for i in xrange(datalen): + node = scipy.ndimage.convolve(cellData.reshape(tuple(grid)+(datalen,))[...,i], + np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells + mode = 'wrap', + origin = -1, # offset to have cell origin as center + ) # now averaged at cell origins + node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z + node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y + node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x + + nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1) + + return nodeData + +#-------------------------------------------------------------------------------------------------- +def displacementAvgFFT(F,grid,size,nodal=False,transformed=False): + """calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" + if nodal: + x, y, z = np.meshgrid(np.linspace(0,size[0],1+grid[0]), + np.linspace(0,size[1],1+grid[1]), + np.linspace(0,size[2],1+grid[2]), + indexing = 'ij') + else: + x, y, z = np.meshgrid(np.linspace(0,size[0],grid[0],endpoint=False), + np.linspace(0,size[1],grid[1],endpoint=False), + np.linspace(0,size[2],grid[2],endpoint=False), + indexing = 'ij') + + origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) + + F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data + Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average + avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X + + return avgDisplacement + +#-------------------------------------------------------------------------------------------------- +def displacementFluctFFT(F,grid,size,nodal=False,transformed=False): + """calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" + integrator = 0.5j * size / math.pi + + kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])), + np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])), + np.arange(grid[0]//2+1), + indexing = 'ij') + k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) + k_sSquared = np.einsum('...l,...l',k_s,k_s) + k_sSquared[0,0,0] = 1.0 # ignore global average frequency + +#-------------------------------------------------------------------------------------------------- +# integration in Fourier space + + displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm', + F if transformed else np.fft.rfftn(F,axes=(0,1,2)), + k_s, + integrator, + ) / k_sSquared[...,np.newaxis] + +#-------------------------------------------------------------------------------------------------- +# backtransformation to real space + + displacement = np.fft.irfftn(displacement_fourier,grid,axes=(0,1,2)) + + return cell2node(displacement,grid) if nodal else displacement + + +# -------------------------------------------------------------------- +# MAIN +# -------------------------------------------------------------------- + +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ +Add displacments resulting from deformation gradient field. +Operates on periodic three-dimensional x,y,z-ordered data sets. +Outputs at cell centers or cell nodes (into separate file). + +""", version = scriptID) + +parser.add_option('-f', '--defgrad', + dest = 'defgrad', + metavar = 'string', + help = 'column label of deformation gradient [%default]') +parser.add_option('-c', '--coordinates', + dest = 'coords', + metavar = 'string', + help = 'column label of coordinates [%default]') +parser.add_option('--nodal', + dest = 'nodal', + action = 'store_true', + help = 'output nodal (not cell-centered) displacements') + +parser.set_defaults(defgrad = 'f', + coords = 'pos', + nodal = False, + ) + +(options,filenames) = parser.parse_args() + +# --- loop over input files ------------------------------------------------------------------------- + +if filenames == []: filenames = [None] + +for name in filenames: + try: table = damask.ASCIItable(name = name, + outname = (os.path.splitext(name)[0]+ + '_nodal'+ + os.path.splitext(name)[1]) if (options.nodal and name) else None, + buffered = False) + except: continue + damask.util.report(scriptName,name) + +# ------------------------------------------ read header ------------------------------------------ + + table.head_read() + +# ------------------------------------------ sanity checks ---------------------------------------- + + errors = [] + remarks = [] + + if table.label_dimension(options.defgrad) != 9: + errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) + + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + elif coordDim < 3: + remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, + 's' if coordDim < 2 else '', + options.coords)) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss=True) + continue + +# --------------- figure out size and grid --------------------------------------------------------- + + table.data_readArray([options.defgrad,options.coords]) + table.data_rewind() + + if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape + if table.data[:,9:].shape[1] < 3: + table.data = np.hstack((table.data, + np.zeros((table.data.shape[0], + 3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# --------------- figure out size and grid --------------------------------------------------------- + + coords = [np.unique(table.data[:,9+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings + + N = grid.prod() + + if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# ------------------------------------------ process data ------------------------------------------ + + F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once... + + displacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True) + avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True) + +# ------------------------------------------ assemble header --------------------------------------- + + if options.nodal: + table.info_clear() + table.labels_clear() + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) + table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) + + ['{}_avg({}).{}' .format(i+1,options.defgrad,options.coords) for i in xrange(3)] + + ['{}_fluct({}).{}'.format(i+1,options.defgrad,options.coords) for i in xrange(3)] ) + table.head_write() + +# ------------------------------------------ output data ------------------------------------------- + + zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else xrange(grid[2]) + yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else xrange(grid[1]) + xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else xrange(grid[0]) + + for i,z in enumerate(zrange): + for j,y in enumerate(yrange): + for k,x in enumerate(xrange): + if options.nodal: table.data_clear() + else: table.data_read() + table.data_append([x,y,z] if options.nodal else []) + table.data_append(list(avgDisplacement[i,j,k,:])) + table.data_append(list( displacement[i,j,k,:])) + table.data_write() + +# ------------------------------------------ output finalization ----------------------------------- + + table.close() # close ASCII tables diff --git a/processing/post/addDivergence.py b/processing/post/addDivergence.py index b661b647b..8d58367ac 100755 --- a/processing/post/addDivergence.py +++ b/processing/post/addDivergence.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -10,39 +10,35 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def divFFT(geomdim,field): - N = grid.prod() # field size - n = np.array(np.shape(field)[3:]).prod() # data size + grid = np.array(np.shape(field)[2::-1]) + N = grid.prod() # field size + n = np.array(np.shape(field)[3:]).prod() # data size field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) - div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector + div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector # differentiation in Fourier space k_s=np.zeros([3],'i') - TWOPIIMG = (0.0+2.0j*math.pi) + TWOPIIMG = 2.0j*math.pi for i in xrange(grid[2]): k_s[0] = i - if(grid[2]%2==0 and i == grid[2]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[0]=0 - elif (i > grid[2]//2): - k_s[0] = k_s[0] - grid[2] + if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif i > grid[2]//2: k_s[0] -= grid[2] for j in xrange(grid[1]): k_s[1] = j - if(grid[1]%2==0 and j == grid[1]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[1]=0 - elif (j > grid[1]//2): - k_s[1] = k_s[1] - grid[1] + if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif j > grid[1]//2: k_s[1] -= grid[1] for k in xrange(grid[0]//2+1): k_s[2] = k - if(grid[0]%2==0 and k == grid[0]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - k_s[2]=0 + if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) - xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16') - if n == 9: # tensor, 3x3 -> 3 + xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order + if n == 9: # tensor, 3x3 -> 3 for l in xrange(3): div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG - elif n == 3: # vector, 3 -> 1 + elif n == 3: # vector, 3 -> 1 div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG return np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)).reshape([N,n/3]) @@ -62,33 +58,31 @@ Deals with both vector- and tensor-valued fields. parser.add_option('-c','--coordinates', dest = 'coords', type = 'string', metavar = 'string', - help = 'column heading for coordinates [%default]') + help = 'column label of coordinates [%default]') parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '', - help = 'heading of columns containing vector field values') + help = 'column label(s) of vector field values') parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '', - help = 'heading of columns containing tensor field values') + help = 'column label(s) of tensor field values') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', ) (options,filenames) = parser.parse_args() -if options.vector == None and options.tensor == None: +if options.vector is None and options.tensor is None: parser.error('no data column specified.') -# --- loop over input files ------------------------------------------------------------------------- +# --- loop over input files ------------------------------------------------------------------------ if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name,buffered = False) - except: - continue + try: table = damask.ASCIItable(name = name,buffered = False) + except: continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ @@ -140,15 +134,16 @@ for name in filenames: maxcorner = np.array(map(max,coords)) grid = np.array(map(len,coords),'i') size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other ones # ------------------------------------------ process value field ----------------------------------- stack = [table.data] for type, data in items.iteritems(): for i,label in enumerate(data['active']): - stack.append(divFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation - table.data[:,data['column'][i]:data['column'][i]+data['dim']].\ + # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation + stack.append(divFFT(size[::-1], + table.data[:,data['column'][i]:data['column'][i]+data['dim']]. reshape([grid[2],grid[1],grid[0]]+data['shape']))) # ------------------------------------------ output result ----------------------------------------- diff --git a/processing/post/addEhkl.py b/processing/post/addEhkl.py index 1e8688518..96ec79a96 100755 --- a/processing/post/addEhkl.py +++ b/processing/post/addEhkl.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -48,7 +48,7 @@ parser.set_defaults(hkl = (1,1,1), (options,filenames) = parser.parse_args() -if options.stiffness == None: +if options.stiffness is None: parser.error('no data column specified...') # --- loop over input files ------------------------------------------------------------------------- @@ -93,4 +93,4 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- - table.close() # close ASCII tables + table.close() # close ASCII tables \ No newline at end of file diff --git a/processing/post/addEuclideanDistance.py b/processing/post/addEuclideanDistance.py index 8139e7536..5299c4d27 100755 --- a/processing/post/addEuclideanDistance.py +++ b/processing/post/addEuclideanDistance.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,itertools +import os,sys,itertools import numpy as np from scipy import ndimage from optparse import OptionParser @@ -89,23 +89,24 @@ Add column(s) containing Euclidean distance to grain structural features: bounda """, version = scriptID) parser.add_option('-c','--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') + help='column label of coordinates [%default]') parser.add_option('-i','--identifier', dest='id', metavar = 'string', - help='heading of column containing grain identifier [%default]') + help='column label of grain identifier [%default]') parser.add_option('-t','--type', dest = 'type', action = 'extend', metavar = '', help = 'feature type {%s} '%(', '.join(map(lambda x:'/'.join(x['names']),features))) ) parser.add_option('-n','--neighborhood',dest='neighborhood', choices = neighborhoods.keys(), metavar = 'string', help = 'type of neighborhood [neumann] {%s}'%(', '.join(neighborhoods.keys()))) -parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar='float', +parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar = 'float', help = 'voxel size [%default]') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(id = 'texture') -parser.set_defaults(neighborhood = 'neumann') -parser.set_defaults(scale = 1.0) +parser.set_defaults(coords = 'pos', + id = 'texture', + neighborhood = 'neumann', + scale = 1.0, + ) (options,filenames) = parser.parse_args() -if options.type == None: +if options.type is None: parser.error('no feature type selected.') if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))): parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) @@ -125,10 +126,8 @@ for i,feature in enumerate(features): if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue + try: table = damask.ASCIItable(name = name, buffered = False) + except: continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ @@ -141,9 +140,11 @@ for name in filenames: remarks = [] column = {} - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) else: coordCol = table.label_index(options.coords) - + if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id)) else: idCol = table.label_index(options.id) @@ -164,18 +165,20 @@ for name in filenames: table.data_readArray() - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,coordCol+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness + coords = [np.unique(table.data[:,coordCol+i]) for i in xrange(coordDim)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords)+[1]*(3-len(coords)),'i') - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + N = grid.prod() + + if N != len(table.data): errors.append('data count {} does not match grid '.format(N) + + 'x'.join(map(str,grid)) + + '.') + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue # ------------------------------------------ process value field ----------------------------------- @@ -219,4 +222,4 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- - table.close() # close input ASCII table (works for stdin) + table.close() # close input ASCII table (works for stdin) \ No newline at end of file diff --git a/processing/post/addGradient.py b/processing/post/addGradient.py new file mode 100755 index 000000000..4d136c8b9 --- /dev/null +++ b/processing/post/addGradient.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 no BOM -*- + +import os,sys,math +import numpy as np +from optparse import OptionParser +import damask + +scriptName = os.path.splitext(os.path.basename(__file__))[0] +scriptID = ' '.join([scriptName,damask.version]) + +#-------------------------------------------------------------------------------------------------- +def gradFFT(geomdim,field): + + grid = np.array(np.shape(field)[2::-1]) + N = grid.prod() # field size + n = np.array(np.shape(field)[3:]).prod() # data size + if n == 3: dataType = 'vector' + elif n == 1: dataType = 'scalar' + + field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) + grad_fourier = np.zeros(field_fourier.shape+(3,),'c16') + +# differentiation in Fourier space + k_s = np.zeros([3],'i') + TWOPIIMG = 2.0j*math.pi + for i in xrange(grid[2]): + k_s[0] = i + if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif i > grid[2]//2: k_s[0] -= grid[2] + + for j in xrange(grid[1]): + k_s[1] = j + if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + elif j > grid[1]//2: k_s[1] -= grid[1] + + for k in xrange(grid[0]//2+1): + k_s[2] = k + if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) + + xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field order + + grad_fourier[i,j,k,0,:] = field_fourier[i,j,k,0]*xi *TWOPIIMG # vector field from scalar data + + if dataType == 'vector': + grad_fourier[i,j,k,1,:] = field_fourier[i,j,k,1]*xi *TWOPIIMG # tensor field from vector data + grad_fourier[i,j,k,2,:] = field_fourier[i,j,k,2]*xi *TWOPIIMG + + return np.fft.fftpack.irfftn(grad_fourier,axes=(0,1,2)).reshape([N,3*n]) + + +# -------------------------------------------------------------------- +# MAIN +# -------------------------------------------------------------------- + +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ +Add column(s) containing gradient of requested column(s). +Operates on periodic ordered three-dimensional data sets. +Deals with both vector- and scalar fields. + +""", version = scriptID) + +parser.add_option('-c','--coordinates', + dest = 'coords', + type = 'string', metavar='string', + help = 'column label of coordinates [%default]') +parser.add_option('-v','--vector', + dest = 'vector', + action = 'extend', metavar = '', + help = 'column label(s) of vector field values') +parser.add_option('-s','--scalar', + dest = 'scalar', + action = 'extend', metavar = '', + help = 'column label(s) of scalar field values') + +parser.set_defaults(coords = 'pos', + ) + +(options,filenames) = parser.parse_args() + +if options.vector is None and options.scalar is None: + parser.error('no data column specified.') + +# --- loop over input files ------------------------------------------------------------------------ + +if filenames == []: filenames = [None] + +for name in filenames: + try: table = damask.ASCIItable(name = name,buffered = False) + except: continue + damask.util.report(scriptName,name) + +# ------------------------------------------ read header ------------------------------------------ + + table.head_read() + +# ------------------------------------------ sanity checks ---------------------------------------- + + items = { + 'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []}, + 'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []}, + } + errors = [] + remarks = [] + column = {} + + if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + else: colCoord = table.label_index(options.coords) + + for type, data in items.iteritems(): + for what in (data['labels'] if data['labels'] is not None else []): + dim = table.label_dimension(what) + if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type)) + else: + items[type]['active'].append(what) + items[type]['column'].append(table.label_index(what)) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# ------------------------------------------ assemble header -------------------------------------- + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) + for type, data in items.iteritems(): + for label in data['active']: + table.labels_append(['{}_gradFFT({})'.format(i+1,label) for i in xrange(3 * data['dim'])]) # extend ASCII header with new labels + table.head_write() + +# --------------- figure out size and grid --------------------------------------------------------- + + table.data_readArray() + + coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) + +# ------------------------------------------ process value field ----------------------------------- + + stack = [table.data] + for type, data in items.iteritems(): + for i,label in enumerate(data['active']): + # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation + stack.append(gradFFT(size[::-1], + table.data[:,data['column'][i]:data['column'][i]+data['dim']]. + reshape([grid[2],grid[1],grid[0]]+data['shape']))) + +# ------------------------------------------ output result ----------------------------------------- + + if len(stack) > 1: table.data = np.hstack(tuple(stack)) + table.data_writeArray('%.12g') + +# ------------------------------------------ output finalization ----------------------------------- + + table.close() # close input ASCII table (works for stdin) diff --git a/processing/post/addGrainID.py b/processing/post/addGrainID.py index 384d161ae..a250c197c 100755 --- a/processing/post/addGrainID.py +++ b/processing/post/addGrainID.py @@ -1,11 +1,10 @@ #!/usr/bin/env python -import os,sys,string,time,copy +import os,sys,time,copy import numpy as np import damask from optparse import OptionParser from scipy import spatial -from collections import defaultdict scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -23,7 +22,7 @@ parser.add_option('-r', '--radius', parser.add_option('-d', '--disorientation', dest = 'disorientation', type = 'float', metavar = 'float', - help = 'disorientation threshold per grain [%default] (degrees)') + help = 'disorientation threshold in degrees [%default]') parser.add_option('-s', '--symmetry', dest = 'symmetry', type = 'string', metavar = 'string', @@ -61,22 +60,23 @@ parser.add_option('-p', '--position', type = 'string', metavar = 'string', help = 'spatial position of voxel [%default]') -parser.set_defaults(symmetry = 'cubic', +parser.set_defaults(disorientation = 5, + symmetry = 'cubic', coords = 'pos', degrees = False, ) (options, filenames) = parser.parse_args() -if options.radius == None: +if options.radius is None: parser.error('no radius specified.') -input = [options.eulers != None, - options.a != None and \ - options.b != None and \ - options.c != None, - options.matrix != None, - options.quaternion != None, +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, ] if np.sum(input) != 1: parser.error('needs exactly one input format.') @@ -86,17 +86,16 @@ if np.sum(input) != 1: parser.error('needs exactly one input format.') (options.matrix,9,'matrix'), (options.quaternion,4,'quaternion'), ][np.where(input)[0][0]] # select input label that was requested -toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians -cos_disorientation = np.cos(options.disorientation/2.*toRadians) +toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians +cos_disorientation = np.cos(np.radians(options.disorientation/2.)) # cos of half the disorientation angle # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) + try: table = damask.ASCIItable(name = name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -109,8 +108,10 @@ for name in filenames: errors = [] remarks = [] - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim)) + if not 3 >= table.label_dimension(options.coords) >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + if not np.all(table.label_dimension(label) == dim): + errors.append('input {} does not have dimension {}.'.format(label,dim)) else: column = table.label_index(label) if remarks != []: damask.util.croak(remarks) @@ -122,8 +123,10 @@ for name in filenames: # ------------------------------------------ assemble header --------------------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append('grainID_{}@{}'.format(label, - options.disorientation if options.degrees else np.degrees(options.disorientation))) # report orientation source and disorientation in degrees + table.labels_append('grainID_{}@{:g}'.format('+'.join(label) + if isinstance(label, (list,tuple)) + else label, + options.disorientation)) # report orientation source and disorientation table.head_write() # ------------------------------------------ process data ------------------------------------------ @@ -161,7 +164,8 @@ for name in filenames: if p > 0 and p % 1000 == 0: time_delta = (time.clock()-tick) * (len(grainID) - p) / p - bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) + bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'\ + %(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),np.count_nonzero(memberCounts))) if inputtype == 'eulers': o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians, @@ -178,83 +182,51 @@ for name in filenames: o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])), symmetry = options.symmetry).reduced() - matched = False + matched = False + alreadyChecked = {} + candidates = [] + bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case -# check against last matched needs to be really picky. best would be to exclude jumps across the poke (checking distance between last and me?) -# when walking through neighborhood first check whether grainID of that point has already been tested, if yes, skip! - - if matchedID != -1: # has matched before? - matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation - - if not matched: - alreadyChecked = {} - bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case - for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points - gID = grainID[i] - if gID != -1 and gID not in alreadyChecked: # an already indexed point belonging to a grain not yet tested? - alreadyChecked[gID] = True # remember not to check again - disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against that grain's orientation (and skip requirement of axis within SST) - if disorientation.quaternion.w > cos_disorientation and \ - disorientation.quaternion.w >= bestDisorientation.w: # within disorientation threshold and better than current best? + for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points + gID = grainID[i] + if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested? + alreadyChecked[gID] = True # remember not to check again + disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation + if disorientation.quaternion.w > cos_disorientation: # within threshold ... + candidates.append(gID) # remember as potential candidate + if disorientation.quaternion.w >= bestDisorientation.w: # ... and better than current best? matched = True matchedID = gID # remember that grain bestDisorientation = disorientation.quaternion - if not matched: # no match -> new grain found - memberCounts += [1] # start new membership counter + if matched: # did match existing grain + memberCounts[matchedID] += 1 + if len(candidates) > 1: # ambiguity in grain identification? + largestGrain = sorted(candidates,key=lambda x:memberCounts[x])[-1] # find largest among potential candidate grains + matchedID = largestGrain + for c in [c for c in candidates if c != largestGrain]: # loop over smaller candidates + memberCounts[largestGrain] += memberCounts[c] # reassign member count of smaller to largest + memberCounts[c] = 0 + grainID = np.where(np.in1d(grainID,candidates), largestGrain, grainID) # relabel grid points of smaller candidates as largest one + + else: # no match -> new grain found orientations += [o] # initialize with current orientation + memberCounts += [1] # start new membership counter matchedID = g g += 1 # increment grain counter - else: # did match existing grain - memberCounts[matchedID] += 1 - grainID[p] = matchedID # remember grain index assigned to point p += 1 # increment point - bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations))) - - memberCounts = np.array(memberCounts) - similarOrientations = [[] for i in xrange(len(orientations))] - - for i,orientation in enumerate(orientations[:-1]): # compare each identified orientation... - for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards - if orientation.disorientation(orientations[j],SST = False)[0].quaternion.w > cos_disorientation: # similar orientations in both grainIDs? - similarOrientations[i].append(j) # remember in upper triangle... - similarOrientations[j].append(i) # ...and lower triangle of matrix - - if similarOrientations[i] != []: - bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(str,similarOrientations[i])))) - - stillShifting = True - while stillShifting: - stillShifting = False - tick = time.clock() - - for p,gID in enumerate(grainID): # walk through all points - if p > 0 and p % 1000 == 0: - - time_delta = (time.clock()-tick) * (len(grainID) - p) / p - bg.set_message('(%02i:%02i:%02i) shifting ID of point %i out of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) - if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else? - similarNeighbors = defaultdict(int) # dict holding frequency of neighboring grainIDs that share my orientation (freq info not used...) - for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points - if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation? - similarNeighbors[grainID[i]] += 1 # remember its grainID - if similarNeighbors != {}: # found similar orientation(s) in neighborhood - candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me - grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset - memberCounts[gID] -= 1 # my former ID loses one fellow - memberCounts[grainID[p]] += 1 # my new ID gains one fellow - bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID - stillShifting = True - + grainIDs = np.where(np.array(memberCounts) > 0)[0] # identify "live" grain identifiers + packingMap = dict(zip(list(grainIDs),range(len(grainIDs)))) # map to condense into consecutive IDs + table.data_rewind() outputAlive = True p = 0 while outputAlive and table.data_read(): # read next data line of ASCII table - table.data_append(1+grainID[p]) # add grain ID + table.data_append(1+packingMap[grainID[p]]) # add (condensed) grain ID outputAlive = table.data_write() # output processed line p += 1 diff --git a/processing/post/addIPFcolor.py b/processing/post/addIPFcolor.py index da45c1288..2a1a3581a 100755 --- a/processing/post/addIPFcolor.py +++ b/processing/post/addIPFcolor.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -62,12 +62,12 @@ parser.set_defaults(pole = (0.0,0.0,1.0), (options, filenames) = parser.parse_args() -input = [options.eulers != None, - options.a != None and \ - options.b != None and \ - options.c != None, - options.matrix != None, - options.quaternion != None, +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, ] if np.sum(input) != 1: parser.error('needs exactly one input format.') @@ -99,7 +99,7 @@ for name in filenames: # ------------------------------------------ sanity checks ---------------------------------------- if not np.all(table.label_dimension(label) == dim): - damask.util.croak('input {} has wrong dimension {}.'.format(label,dim)) + damask.util.croak('input {} does not have dimension {}.'.format(label,dim)) table.close(dismiss = True) # close ASCIItable and remove empty file continue diff --git a/processing/post/addMapped.py b/processing/post/addMapped.py index cf3419745..2ce84959c 100755 --- a/processing/post/addMapped.py +++ b/processing/post/addMapped.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys from optparse import OptionParser import damask @@ -39,14 +39,14 @@ parser.set_defaults(offset = 0, (options,filenames) = parser.parse_args() -if options.label == None: +if options.label is None: parser.error('no data columns specified.') -if options.map == None: +if options.map is None: parser.error('no mapping column given.') # ------------------------------------------ process mapping ASCIItable --------------------------- -if options.asciitable != None and os.path.isfile(options.asciitable): +if options.asciitable is not None and os.path.isfile(options.asciitable): mappedTable = damask.ASCIItable(name = options.asciitable, buffered = False, readonly = True) diff --git a/processing/post/addMises.py b/processing/post/addMises.py index 910f26c70..771cb7bc4 100755 --- a/processing/post/addMises.py +++ b/processing/post/addMises.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask diff --git a/processing/post/addNorm.py b/processing/post/addNorm.py index df2fa11f8..3aa9e8c04 100755 --- a/processing/post/addNorm.py +++ b/processing/post/addNorm.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math from optparse import OptionParser import damask @@ -43,7 +43,7 @@ parser.set_defaults(norm = 'frobenius', (options,filenames) = parser.parse_args() -if options.label == None: +if options.label is None: parser.error('no data column specified.') # --- loop over input files ------------------------------------------------------------------------- diff --git a/processing/post/addOrientations.py b/processing/post/addOrientations.py index 91b4dbc8a..f7519d92c 100755 --- a/processing/post/addOrientations.py +++ b/processing/post/addOrientations.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -15,7 +15,8 @@ scriptID = ' '.join([scriptName,damask.version]) parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ Add quaternion and/or Bunge Euler angle representation of crystal lattice orientation. -Orientation is given by quaternion, Euler angles, rotation matrix, or crystal frame coordinates (i.e. component vectors of rotation matrix). +Orientation is given by quaternion, Euler angles, rotation matrix, or crystal frame coordinates +(i.e. component vectors of rotation matrix). """, version = scriptID) @@ -74,12 +75,12 @@ options.output = map(lambda x: x.lower(), options.output) if options.output == [] or (not set(options.output).issubset(set(outputChoices))): parser.error('output must be chosen from {}.'.format(', '.join(outputChoices))) -input = [options.eulers != None, - options.a != None and \ - options.b != None and \ - options.c != None, - options.matrix != None, - options.quaternion != None, +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, ] if np.sum(input) != 1: parser.error('needs exactly one input format.') @@ -112,7 +113,7 @@ for name in filenames: errors = [] remarks = [] - if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim)) + if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim)) else: column = table.label_index(label) if remarks != []: damask.util.croak(remarks) diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py index 82dc5a26c..349b9e106 100755 --- a/processing/post/addPK2.py +++ b/processing/post/addPK2.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -14,7 +14,8 @@ scriptID = ' '.join([scriptName,damask.version]) # -------------------------------------------------------------------- parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Add column(s) containing Second Piola--Kirchhoff stress based on given column(s) of deformation gradient and first Piola--Kirchhoff stress. +Add column(s) containing Second Piola--Kirchhoff stress based on given column(s) of deformation +gradient and first Piola--Kirchhoff stress. """, version = scriptID) diff --git a/processing/post/addPole.py b/processing/post/addPole.py index 9228058a2..a57fef9eb 100755 --- a/processing/post/addPole.py +++ b/processing/post/addPole.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -62,12 +62,12 @@ parser.set_defaults(pole = (1.0,0.0,0.0), (options, filenames) = parser.parse_args() -input = [options.eulers != None, - options.a != None and \ - options.b != None and \ - options.c != None, - options.matrix != None, - options.quaternion != None, +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, ] if np.sum(input) != 1: parser.error('needs exactly one input format.') @@ -101,7 +101,7 @@ for name in filenames: errors = [] remarks = [] - if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim)) + if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim)) else: column = table.label_index(label) if remarks != []: damask.util.croak(remarks) @@ -133,7 +133,7 @@ for name in filenames: rotatedPole = o.quaternion*pole # rotate pole according to crystal orientation (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection - table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates + table.data_append([np.sqrt(x*x+y*y),np.arctan2(y,x)] if options.polar else [x,y]) # cartesian coordinates outputAlive = table.data_write() # output processed line diff --git a/processing/post/addQuaternions.py b/processing/post/addQuaternions.py deleted file mode 100755 index 185d11a64..000000000 --- a/processing/post/addQuaternions.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 no BOM -*- - -import os,sys,string,numpy as np -from optparse import OptionParser -import damask - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Add Quaternions based on Crystal Frame Coordinates. - -""", version = scriptID) - -parser.add_option('-f','--frame', dest='frame', nargs=4, type='string', metavar='', - help='heading of columns containing b* vector components and three frame vectors in that order') -parser.add_option('-s','--symmetry', dest='crysym', nargs=1,type='string',metavar='', - help='crystal symmetry definition') -parser.set_defaults(frame = None) - -(options,filenames) = parser.parse_args() - -if options.frame == None: - parser.error('no data column specified...') - -datainfo = {'len':4, - 'label':[] - } - -if options.frame != None: datainfo['label'] += options.frame - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) - - table.head_read() # read ASCII header info - -# --------------- figure out columns to process --------------------------------------------------- - active = [] - column = {} - - for label in datainfo['label']: - key = '1_'+label if datainfo['len'] > 1 else label # non-special labels have to start with '1_' - if key in table.labels: - active.append(label) - column[label] = table.labels.index(key) # remember columns of requested data - else: - damask.util.croak('column %s not found...'%label) - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['Q_%i'%(i+1) for i in xrange(4)]) # extend ASCII header with new labels [1 real, 3 imaginary components] - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - vec = np.zeros([4,3]) - for i,label in enumerate(active): - vec[i,:] = np.array(table.data[column[label]: - column[label]+3]) - - if sys.argv[1:][6]=='hexagonal': # Ensure Input matrix is orthogonal - M=np.dot(vec[0,:],vec[2,:]) - vec[1,:]=vec[1,:]/np.linalg.norm(vec[1,:]) - vec[2,:]=M*(vec[0,:]/np.linalg.norm(vec[0,:])) - vec[3,:]=vec[3,:]/np.linalg.norm(vec[3,:]) - else: - vec[1,:]=vec[1,:]/np.linalg.norm(vec[1,:]) - vec[2,:]=vec[2,:]/np.linalg.norm(vec[2,:]) - vec[3,:]=vec[3,:]/np.linalg.norm(vec[3,:]) - - - Ori=damask.Orientation(matrix=vec[1:,:],symmetry=sys.argv[1:][6]) - - table.data_append(np.asarray(Ori.asQuaternion())) - - - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output result ----------------------------------------- - outputAlive and table.output_flush() # just in case of buffered ASCII table - - table.close() # close ASCII tables diff --git a/processing/post/addSchmidfactors.py b/processing/post/addSchmidfactors.py index 30d3d6f4a..5a08024fc 100755 --- a/processing/post/addSchmidfactors.py +++ b/processing/post/addSchmidfactors.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -9,226 +9,95 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -slipnormal_temp = [ - [0,0,0,1], - [0,0,0,1], - [0,0,0,1], - [0,1,-1,0], - [-1,0,1,0], - [1,-1,0,0], - [0,1,-1,1], - [-1,1,0,1], - [-1,0,1,1], - [0,-1,1,1], - [1,-1,0,1], - [1,0,-1,1], - [0,1,-1,1], - [0,1,-1,1], - [-1,1,0,1], - [-1,1,0,1], - [-1,0,1,1], - [-1,0,1,1], - [0,-1,1,1], - [0,-1,1,1], - [1,-1,0,1], - [1,-1,0,1], - [1,0,-1,1], - [1,0,-1,1], - ] - -slipdirection_temp = [ - [2,-1,-1,0], - [-1,2,-1,0], - [-1,-1,2,0], - [2,-1,-1,0], - [-1,2,-1,0], - [-1,-1,2,0], - [2,-1,-1,0], - [1,1,-2,0], - [-1,2,-1,0], - [-2,1,1,0], - [-1,-1,2,0], - [1,-2,1,0], - [-1,2,-1,3], - [1,1,-2,3], - [-2,1,1,3], - [-1,2,-1,3], - [-1,-1,2,3], - [-2,1,1,3], - [1,-2,1,3], - [-1,-1,2,3], - [2,-1,-1,3], - [1,-2,1,3], - [1,1,-2,3], - [2,-1,-1,3], - ] - -# slip normals and directions according to cpfem implementation -Nslipsystems = {'fcc': 12, 'bcc': 24, 'hex': 24} -slipnormal = { \ - 'fcc': [ - [1,1,1], - [1,1,1], - [1,1,1], - [-1,-1,1], - [-1,-1,1], - [-1,-1,1], - [1,-1,-1], - [1,-1,-1], - [1,-1,-1], - [-1,1,-1], - [-1,1,-1], - [-1,1,-1], - ], - 'bcc': [ - [0,1,1], - [0,1,1], - [0,-1,1], - [0,-1,1], - [1,0,1], - [1,0,1], - [-1,0,1], - [-1,0,1], - [1,1,0], - [1,1,0], - [-1,1,0], - [-1,1,0], - [2,1,1], - [-2,1,1], - [2,-1,1], - [2,1,-1], - [1,2,1], - [-1,2,1], - [1,-2,1], - [1,2,-1], - [1,1,2], - [-1,1,2], - [1,-1,2], - [1,1,-2], - ], - 'hex': [ # these are dummy numbers and are recalculated based on the above hex real slip systems. - [1,1,0], - [1,1,0], - [1,0,1], - [1,0,1], - [0,1,1], - [0,1,1], - [1,-1,0], - [1,-1,0], - [-1,0,1], - [-1,0,1], - [0,-1,1], - [0,-1,1], - [2,-1,1], - [1,-2,-1], - [1,1,2], - [2,1,1], - [1,2,-1], - [1,-1,2], - [2,1,-1], - [1,2,1], - [1,-1,-2], - [2,-1,-1], - [1,-2,1], - [1,1,-2], - ], - } -slipdirection = { \ - 'fcc': [ - [0,1,-1], - [-1,0,1], - [1,-1,0], - [0,-1,-1], - [1,0,1], - [-1,1,0], - [0,-1,1], - [-1,0,-1], - [1,1,0], - [0,1,1], - [1,0,-1], - [-1,-1,0], - ], - 'bcc': [ - [1,-1,1], - [-1,-1,1], - [1,1,1], - [-1,1,1], - [-1,1,1], - [-1,-1,1], - [1,1,1], - [1,-1,1], - [-1,1,1], - [-1,1,-1], - [1,1,1], - [1,1,-1], - [-1,1,1], - [1,1,1], - [1,1,-1], - [1,-1,1], - [1,-1,1], - [1,1,-1], - [1,1,1], - [-1,1,1], - [1,1,-1], - [1,-1,1], - [-1,1,1], - [1,1,1], - ], - 'hex': [ # these are dummy numbers and are recalculated based on the above hex real slip systems. - [-1,1,1], - [1,-1,1], - [-1,-1,1], - [-1,1,1], - [-1,-1,1], - [1,-1,1], - [1,1,1], - [-1,-1,1], - [1,-1,1], - [1,1,1], - [1,1,1], - [-1,1,1], - [1,1,-1], - [1,1,-1], - [1,1,-1], - [1,-1,-1], - [1,-1,-1], - [1,-1,-1], - [1,-1,1], - [1,-1,1], - [1,-1,1], - [1,1,1], - [1,1,1], - [1,1,1], - ], - } - -def applyEulers(phi1,Phi,phi2,x): - """ transform x given in crystal coordinates to xbar returned in lab coordinates for Euler angles phi1,Phi,phi2 """ - - eulerRot = [[ math.cos(phi1)*math.cos(phi2) - math.cos(Phi)*math.sin(phi1)*math.sin(phi2), - -math.cos(phi1)*math.sin(phi2) - math.cos(Phi)*math.cos(phi2)*math.sin(phi1), - math.sin(Phi)*math.sin(phi1) - ], - [ math.cos(phi2)*math.sin(phi1) + math.cos(Phi)*math.cos(phi1)*math.sin(phi2), - math.cos(Phi)*math.cos(phi1)*math.cos(phi2) - math.sin(phi1)*math.sin(phi2), - -math.sin(Phi)*math.cos(phi1) - ], - [ math.sin(Phi)*math.sin(phi2), - math.sin(Phi)*math.cos(phi2), - math.cos(Phi) - ]] - - xbar = [0,0,0] - if len(x) == 3: - for i in range(3): - xbar[i] = sum([eulerRot[i][j]*x[j] for j in range(3)]) - return xbar - -def normalize(x): - - norm = math.sqrt(sum([x[i]*x[i] for i in range(len(x))])) - - return [x[i]/norm for i in range(len(x))] +slipSystems = { +'fcc': + np.array([ + # Slip direction Plane normal + [ 0, 1,-1, 1, 1, 1, ], + [-1, 0, 1, 1, 1, 1, ], + [ 1,-1, 0, 1, 1, 1, ], + [ 0,-1,-1, -1,-1, 1, ], + [ 1, 0, 1, -1,-1, 1, ], + [-1, 1, 0, -1,-1, 1, ], + [ 0,-1, 1, 1,-1,-1, ], + [-1, 0,-1, 1,-1,-1, ], + [ 1, 1, 0, 1,-1,-1, ], + [ 0, 1, 1, -1, 1,-1, ], + [ 1, 0,-1, -1, 1,-1, ], + [-1,-1, 0, -1, 1,-1, ], + ],'f'), +'bcc': + np.array([ + # Slip system <111>{110} + [ 1,-1, 1, 0, 1, 1, ], + [-1,-1, 1, 0, 1, 1, ], + [ 1, 1, 1, 0,-1, 1, ], + [-1, 1, 1, 0,-1, 1, ], + [-1, 1, 1, 1, 0, 1, ], + [-1,-1, 1, 1, 0, 1, ], + [ 1, 1, 1, -1, 0, 1, ], + [ 1,-1, 1, -1, 0, 1, ], + [-1, 1, 1, 1, 1, 0, ], + [-1, 1,-1, 1, 1, 0, ], + [ 1, 1, 1, -1, 1, 0, ], + [ 1, 1,-1, -1, 1, 0, ], + # Slip system <111>{112} + [-1, 1, 1, 2, 1, 1, ], + [ 1, 1, 1, -2, 1, 1, ], + [ 1, 1,-1, 2,-1, 1, ], + [ 1,-1, 1, 2, 1,-1, ], + [ 1,-1, 1, 1, 2, 1, ], + [ 1, 1,-1, -1, 2, 1, ], + [ 1, 1, 1, 1,-2, 1, ], + [-1, 1, 1, 1, 2,-1, ], + [ 1, 1,-1, 1, 1, 2, ], + [ 1,-1, 1, -1, 1, 2, ], + [-1, 1, 1, 1,-1, 2, ], + [ 1, 1, 1, 1, 1,-2, ], + ],'f'), +'hex': + np.array([ + # Basal systems <11.0>{00.1} (independent of c/a-ratio, Bravais notation (4 coordinate base)) + [ 2, -1, -1, 0, 0, 0, 0, 1, ], + [-1, 2, -1, 0, 0, 0, 0, 1, ], + [-1, -1, 2, 0, 0, 0, 0, 1, ], + # 1st type prismatic systems <11.0>{10.0} (independent of c/a-ratio) + [ 2, -1, -1, 0, 0, 1, -1, 0, ], + [-1, 2, -1, 0, -1, 0, 1, 0, ], + [-1, -1, 2, 0, 1, -1, 0, 0, ], + # 2nd type prismatic systems <10.0>{11.0} -- a slip; plane normals independent of c/a-ratio + [ 0, 1, -1, 0, 2, -1, -1, 0, ], + [-1, 0, 1, 0, -1, 2, -1, 0, ], + [ 1, -1, 0, 0, -1, -1, 2, 0, ], + # 1st type 1st order pyramidal systems <11.0>{-11.1} -- plane normals depend on the c/a-ratio + [ 2, -1, -1, 0, 0, 1, -1, 1, ], + [-1, 2, -1, 0, -1, 0, 1, 1, ], + [-1, -1, 2, 0, 1, -1, 0, 1, ], + [ 1, 1, -2, 0, -1, 1, 0, 1, ], + [-2, 1, 1, 0, 0, -1, 1, 1, ], + [ 1, -2, 1, 0, 1, 0, -1, 1, ], + # pyramidal system: c+a slip <11.3>{-10.1} -- plane normals depend on the c/a-ratio + [ 2, -1, -1, 3, -1, 1, 0, 1, ], + [ 1, -2, 1, 3, -1, 1, 0, 1, ], + [-1, -1, 2, 3, 1, 0, -1, 1, ], + [-2, 1, 1, 3, 1, 0, -1, 1, ], + [-1, 2, -1, 3, 0, -1, 1, 1, ], + [ 1, 1, -2, 3, 0, -1, 1, 1, ], + [-2, 1, 1, 3, 1, -1, 0, 1, ], + [-1, 2, -1, 3, 1, -1, 0, 1, ], + [ 1, 1, -2, 3, -1, 0, 1, 1, ], + [ 2, -1, -1, 3, -1, 0, 1, 1, ], + [ 1, -2, 1, 3, 0, 1, -1, 1, ], + [-1, -1, 2, 3, 0, 1, -1, 1, ], + # pyramidal system: c+a slip <11.3>{-1-1.2} -- as for hexagonal ice (Castelnau et al. 1996, similar to twin system found below) + [ 2, -1, -1, 3, -2, 1, 1, 2, ], # sorted according to similar twin system + [-1, 2, -1, 3, 1, -2, 1, 2, ], # <11.3>{-1-1.2} shear = 2((c/a)^2-2)/(3 c/a) + [-1, -1, 2, 3, 1, 1, -2, 2, ], + [-2, 1, 1, 3, 2, -1, -1, 2, ], + [ 1, -2, 1, 3, -1, 2, -1, 2, ], + [ 1, 1, -2, 3, -1, -1, 2, 2, ], + ],'f'), +} # -------------------------------------------------------------------- # MAIN @@ -239,126 +108,166 @@ Add columns listing Schmid factors (and optional trace vector of selected system """, version = scriptID) -parser.add_option('-l','--lattice', dest='lattice', type='choice', choices=('fcc','bcc','hex'), metavar='string', - help="type of lattice structure [%default] {fcc,bcc',hex}") -parser.add_option('--direction', dest='forcedirection', type='int', nargs=3, metavar='int int int', - help='force direction in lab coordinates %default') -parser.add_option('-n','--normal', dest='stressnormal', type='int', nargs=3, metavar='int int int', - help='stress plane normal in lab coordinates ') -parser.add_option('--trace', dest='traceplane', type='int', nargs=3, metavar='int int int', - help='normal (in lab coordinates) of plane on which the plane trace of the Schmid factor(s) is reported') -parser.add_option('--covera', dest='CoverA', type='float', metavar='float', - help='C over A ratio for hexagonal systems') -parser.add_option('-r','--rank', dest='rank', type='int', nargs=3, metavar='int int int', - help="report trace of r'th highest Schmid factor [%default]") -parser.add_option('-e', '--eulers', dest='eulers', metavar='string', - help='Euler angles label') -parser.add_option('-d', '--degrees', dest='degrees', action='store_true', - help='Euler angles are given in degrees [%default]') -parser.set_defaults(lattice = 'fcc') -parser.set_defaults(forcedirection = [0, 0, 1]) -parser.set_defaults(stressnormal = None) -parser.set_defaults(traceplane = None) -parser.set_defaults(rank = 0) -parser.set_defaults(CoverA = 1.587) -parser.set_defaults(eulers = 'eulerangles') +latticeChoices = ('fcc','bcc','hex') +parser.add_option('-l','--lattice', + dest = 'lattice', type = 'choice', choices = latticeChoices, metavar='string', + help = 'type of lattice structure [%default] {}'.format(latticeChoices)) +parser.add_option('--covera', + dest = 'CoverA', type = 'float', metavar = 'float', + help = 'C over A ratio for hexagonal systems') +parser.add_option('-f', '--force', + dest = 'force', + type = 'float', nargs = 3, metavar = 'float float float', + help = 'force direction in lab frame [%default]') +parser.add_option('-n', '--normal', + dest = 'normal', + type = 'float', nargs = 3, metavar = 'float float float', + help = 'stress plane normal in lab frame [%default]') +parser.add_option('-e', '--eulers', + dest = 'eulers', + type = 'string', metavar = 'string', + help = 'Euler angles label') +parser.add_option('-d', '--degrees', + dest = 'degrees', + action = 'store_true', + help = 'Euler angles are given in degrees [%default]') +parser.add_option('-m', '--matrix', + dest = 'matrix', + type = 'string', metavar = 'string', + help = 'orientation matrix label') +parser.add_option('-a', + dest = 'a', + type = 'string', metavar = 'string', + help = 'crystal frame a vector label') +parser.add_option('-b', + dest = 'b', + type = 'string', metavar = 'string', + help = 'crystal frame b vector label') +parser.add_option('-c', + dest = 'c', + type = 'string', metavar = 'string', + help = 'crystal frame c vector label') +parser.add_option('-q', '--quaternion', + dest = 'quaternion', + type = 'string', metavar = 'string', + help = 'quaternion label') -(options,filenames) = parser.parse_args() +parser.set_defaults(force = (0.0,0.0,1.0), + normal = None, + lattice = latticeChoices[0], + CoverA = math.sqrt(8./3.), + degrees = False, + ) -options.forcedirection = normalize(options.forcedirection) -if options.stressnormal: - if abs(sum([options.forcedirection[i] * options.stressnormal[i] for i in range(3)])) < 1e-3: - options.stressnormal = normalize(options.stressnormal) - else: - parser.error('stress plane normal not orthogonal to force direction') -else: - options.stressnormal = options.forcedirection -if options.traceplane: - options.traceplane = normalize(options.traceplane) -options.rank = min(options.rank,Nslipsystems[options.lattice]) - -datainfo = { # list of requested labels per datatype - 'vector': {'len':3, - 'label':[]}, - } - -datainfo['vector']['label'] += [options.eulers] +(options, filenames) = parser.parse_args() toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians -if options.lattice=='hex': # Convert 4 Miller indices notation of hex to orthogonal 3 Miller indices notation - for i in range(Nslipsystems[options.lattice]): - slipnormal[options.lattice][i][0]=slipnormal_temp[i][0] - slipnormal[options.lattice][i][1]=(slipnormal_temp[i][0]+2.0*slipnormal_temp[i][1])/math.sqrt(3.0) - slipnormal[options.lattice][i][2]=slipnormal_temp[i][3]/options.CoverA - slipdirection[options.lattice][i][0]=slipdirection_temp[i][0]*1.5 # direction [uvtw]->[3u/2 (u+2v)*sqrt(3)/2 w*(c/a)] , - slipdirection[options.lattice][i][1]=(slipdirection_temp[i][0]+2.0*slipdirection_temp[i][1])*(0.5*math.sqrt(3.0)) - slipdirection[options.lattice][i][2]=slipdirection_temp[i][3]*options.CoverA +force = np.array(options.force) +force /= np.linalg.norm(force) - for i in range(Nslipsystems[options.lattice]): - slipnormal[options.lattice][i]=normalize(slipnormal[options.lattice][i]) - slipdirection[options.lattice][i]=normalize(slipdirection[options.lattice][i]) +if options.normal: + damask.util.croak('got normal') + normal = np.array(options.normal) + normal /= np.linalg.norm(normal) + if abs(np.dot(force,normal)) > 1e-3: + parser.error('stress plane normal not orthogonal to force direction') +else: + normal = force -# --- loop over input files ------------------------------------------------------------------------- +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, + ] + +if np.sum(input) != 1: parser.error('needs exactly one input format.') + +(label,dim,inputtype) = [(options.eulers,3,'eulers'), + ([options.a,options.b,options.c],[3,3,3],'frame'), + (options.matrix,9,'matrix'), + (options.quaternion,4,'quaternion'), + ][np.where(input)[0][0]] # select input label that was requested + +c_direction = np.zeros((len(slipSystems[options.lattice]),3),'f') +c_normal = np.zeros_like(c_direction) + + +if options.lattice in latticeChoices[:2]: + c_direction = slipSystems[options.lattice][:,:3] + c_normal = slipSystems[options.lattice][:,3:] +elif options.lattice == latticeChoices[2]: + # convert 4 Miller index notation of hex to orthogonal 3 Miller index notation + for i in xrange(len(c_direction)): + c_direction[i] = np.array([slipSystems['hex'][i,0]*1.5, + (slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3), + slipSystems['hex'][i,3]*options.CoverA, + ]) + c_normal[i] = np.array([slipSystems['hex'][i,4], + (slipSystems['hex'][i,4] + 2.*slipSystems['hex'][i,5])/np.sqrt(3), + slipSystems['hex'][i,7]/options.CoverA, + ]) + +c_direction /= np.tile(np.linalg.norm(c_direction,axis=1),(3,1)).T +c_normal /= np.tile(np.linalg.norm(c_normal ,axis=1),(3,1)).T + +# --- loop over input files ------------------------------------------------------------------------ if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name,buffered = False) - except: - continue + try: table = damask.ASCIItable(name = name, + buffered = False) + except: continue damask.util.report(scriptName,name) - table.head_read() # read ASCII header info - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) +# ------------------------------------------ read header ------------------------------------------ - key = '1_%s'%datainfo['vector']['label'][0] - if key not in table.labels: - file['croak'].write('column %s not found...\n'%key) + table.head_read() + +# ------------------------------------------ sanity checks ---------------------------------------- + + if not np.all(table.label_dimension(label) == dim): + damask.util.croak('input {} does not have dimension {}.'.format(label,dim)) + table.close(dismiss = True) # close ASCIItable and remove empty file continue - else: - column = table.labels.index(key) # remember columns of requested data + + column = table.label_index(label) # ------------------------------------------ assemble header --------------------------------------- - table.labels_append(['%i_S(%i_%i_%i)[%i_%i_%i]'%(i+1, - slipnormal[options.lattice][i][0], - slipnormal[options.lattice][i][1], - slipnormal[options.lattice][i][2], - slipdirection[options.lattice][i][0], - slipdirection[options.lattice][i][1], - slipdirection[options.lattice][i][2], - ) for i in range(Nslipsystems[options.lattice])]) - - if options.traceplane: - if options.rank > 0: - table.labels_append('trace_x trace_y trace_z system') - else: - table.labels_append(['(%i)tx\tty\ttz'%(i+1) for i in range(Nslipsystems[options.lattice])]) + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) + table.labels_append(['{id}_' + 'S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]' + '({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\ + .format( id = i+1, + normal = theNormal, + direction = theDirection, + ) for i,(theNormal,theDirection) in enumerate(zip(c_normal,c_direction))]) table.head_write() # ------------------------------------------ process data ------------------------------------------ + outputAlive = True while outputAlive and table.data_read(): # read next data line of ASCII table - [phi1,Phi,phi2] = Eulers=toRadians*np.array(map(\ - float,table.data[column:column+datainfo['vector']['len']])) - S = [ sum( [applyEulers(phi1,Phi,phi2,normalize( \ - slipnormal[options.lattice][slipsystem]))[i]*options.stressnormal[i] for i in range(3)] ) * \ - sum( [applyEulers(phi1,Phi,phi2,normalize( \ - slipdirection[options.lattice][slipsystem]))[i]*options.forcedirection[i] for i in range(3)] ) \ - for slipsystem in range(Nslipsystems[options.lattice]) ] - table.data_append(S) - if options.traceplane: - trace = [np.cross(options.traceplane,applyEulers(phi1,Phi,phi2,normalize(slipnormal[options.lattice][slipsystem]))) \ - for slipsystem in range(Nslipsystems[options.lattice]) ] - if options.rank == 0: - table.data_append('\t'.join(map(lambda x:'%f\t%f\t%f'%(x[0],x[1],x[2]),trace))) - elif options.rank > 0: - SabsSorted = sorted([(abs(S[i]),i) for i in range(len(S))]) - table.data_append('\t'.join(map(str,trace[SabsSorted[-options.rank][1]])) + '\t%i'%(1+SabsSorted[-options.rank][1])) + if inputtype == 'eulers': + o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,) + elif inputtype == 'matrix': + o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),) + elif inputtype == 'frame': + o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \ + table.data[column[1]:column[1]+3] + \ + table.data[column[2]:column[2]+3])).reshape(3,3),) + elif inputtype == 'quaternion': + o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),) + + rotForce = o.quaternion.conjugated() * force + rotNormal = o.quaternion.conjugated() * normal + table.data_append(np.abs(np.sum(c_direction*rotForce,axis=1) * np.sum(c_normal*rotNormal,axis=1))) outputAlive = table.data_write() # output processed line -# ------------------------------------------ output finalization ----------------------------------- +# ------------------------------------------ output finalization ----------------------------------- - table.close() # close input ASCII table (works for stdin) + table.close() # close ASCII tables diff --git a/processing/post/addSpectralDecomposition.py b/processing/post/addSpectralDecomposition.py index 3372e35f6..0555bb5bc 100755 --- a/processing/post/addSpectralDecomposition.py +++ b/processing/post/addSpectralDecomposition.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -25,7 +25,7 @@ parser.add_option('-t','--tensor', (options,filenames) = parser.parse_args() -if options.tensor == None: +if options.tensor is None: parser.error('no data column specified.') # --- loop over input files ------------------------------------------------------------------------- diff --git a/processing/post/addStrainTensors.py b/processing/post/addStrainTensors.py index b8e1fc9cc..bd5d7fccf 100755 --- a/processing/post/addStrainTensors.py +++ b/processing/post/addStrainTensors.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -10,7 +10,7 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def operator(stretch,strain,eigenvalues): - ''' Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102 ''' + """Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102""" return { 'V#ln': np.log(eigenvalues) , 'U#ln': np.log(eigenvalues) , @@ -75,7 +75,7 @@ if options.logarithmic: strains.append('ln') if options.biot: strains.append('Biot') if options.green: strains.append('Green') -if options.defgrad == None: +if options.defgrad is None: parser.error('no data column specified.') # --- loop over input files ------------------------------------------------------------------------- diff --git a/processing/post/addTable.py b/processing/post/addTable.py index 1e94319b2..a0055e0b6 100755 --- a/processing/post/addTable.py +++ b/processing/post/addTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys from optparse import OptionParser import damask diff --git a/processing/post/averageDown.py b/processing/post/averageDown.py index 0632a7844..0af56e176 100755 --- a/processing/post/averageDown.py +++ b/processing/post/averageDown.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np import scipy.ndimage from optparse import OptionParser @@ -22,7 +22,7 @@ Average each data block of size 'packing' into single values thus reducing the f parser.add_option('-c','--coordinates', dest = 'coords', type = 'string', metavar = 'string', - help = 'column heading for coordinates [%default]') + help = 'column label of coordinates [%default]') parser.add_option('-p','--packing', dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int', @@ -39,7 +39,7 @@ parser.add_option('-s', '--size', dest = 'size', type = 'float', nargs = 3, metavar = 'float float float', help = 'size in x,y,z [autodetect]') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', packing = (2,2,2), shift = (0,0,0), grid = (0,0,0), @@ -59,11 +59,10 @@ if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift) if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+os.path.basename(name)) if name else name, - buffered = False) + try: table = damask.ASCIItable(name = name, + outname = os.path.join(os.path.dirname(name), + prefix+os.path.basename(name)) if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -75,7 +74,6 @@ for name in filenames: errors = [] remarks = [] - colCoord = None if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) else: colCoord = table.label_index(options.coords) @@ -86,7 +84,6 @@ for name in filenames: table.close(dismiss = True) continue - # ------------------------------------------ assemble header --------------------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) @@ -101,10 +98,10 @@ for name in filenames: mincorner = np.array(map(min,coords)) maxcorner = np.array(map(max,coords)) grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings delta = size/np.maximum(np.ones(3,'d'), grid) - origin = mincorner - 0.5*delta # shift from cell center to corner + origin = mincorner - 0.5*delta # shift from cell center to corner else: grid = np.array(options.grid,'i') diff --git a/processing/post/averageTable.py b/processing/post/averageTable.py index b3d99f49d..4a2f1eca6 100755 --- a/processing/post/averageTable.py +++ b/processing/post/averageTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -28,7 +28,7 @@ parser.add_option('-l','--label', (options,filenames) = parser.parse_args() -if options.label == None: +if options.label is None: parser.error('no grouping column specified.') @@ -37,10 +37,14 @@ if options.label == None: if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = options.label+'_averaged_'+name if name else name, - buffered = False) + damask.util.croak(name) + + try: table = damask.ASCIItable(name = name, + outname = os.path.join( + os.path.split(name)[0], + options.label+'_averaged_'+os.path.split(name)[1] + ) if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) diff --git a/processing/post/binXY.py b/processing/post/binXY.py index d9f0cb25a..c44d60de2 100755 --- a/processing/post/binXY.py +++ b/processing/post/binXY.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -77,11 +77,11 @@ minmax = np.array([np.array(options.xrange), grid = np.zeros(options.bins,'f') result = np.zeros((options.bins[0],options.bins[1],3),'f') -if options.data == None: parser.error('no data columns specified.') +if options.data is None: parser.error('no data columns specified.') labels = options.data -if options.weight != None: labels += [options.weight] # prevent character splitting of single string value +if options.weight is not None: labels += [options.weight] # prevent character splitting of single string value # --- loop over input files ------------------------------------------------------------------------- @@ -124,7 +124,7 @@ for name in filenames: x = int(options.bins[0]*(table.data[i,0]-minmax[0,0])/delta[0]) y = int(options.bins[1]*(table.data[i,1]-minmax[1,0])/delta[1]) if x >= 0 and x < options.bins[0] and y >= 0 and y < options.bins[1]: - grid[x,y] += 1. if options.weight == None else table.data[i,2] # count (weighted) occurrences + grid[x,y] += 1. if options.weight is None else table.data[i,2] # count (weighted) occurrences if options.normCol: for x in xrange(options.bins[0]): diff --git a/processing/post/blowUp.py b/processing/post/blowUp.py index f108da28e..7b8c9bd15 100755 --- a/processing/post/blowUp.py +++ b/processing/post/blowUp.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -19,36 +19,38 @@ to resolution*packing. """, version = scriptID) -parser.add_option('-c','--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') -parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, metavar='int int int', - help='dimension of packed group [%default]') -parser.add_option('-g','--grid', dest='resolution', type='int', nargs=3, metavar='int int int', - help='resolution in x,y,z [autodetect]') -parser.add_option('-s','--size', dest='dimension', type='float', nargs=3, metavar='int int int', - help='dimension in x,y,z [autodetect]') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(packing = (2,2,2)) -parser.set_defaults(grid = (0,0,0)) -parser.set_defaults(size = (0.0,0.0,0.0)) +parser.add_option('-c','--coordinates', + dest = 'coords', metavar = 'string', + help = 'column label of coordinates [%default]') +parser.add_option('-p','--packing', + dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int', + help = 'dimension of packed group [%default]') +parser.add_option('-g','--grid', + dest = 'resolution', type = 'int', nargs = 3, metavar = 'int int int', + help = 'resolution in x,y,z [autodetect]') +parser.add_option('-s','--size', + dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int', + help = 'dimension in x,y,z [autodetect]') +parser.set_defaults(coords = 'pos', + packing = (2,2,2), + grid = (0,0,0), + size = (0.0,0.0,0.0), + ) (options,filenames) = parser.parse_args() - options.packing = np.array(options.packing) -prefix = 'blowUp%ix%ix%i_'%(options.packing[0],options.packing[1],options.packing[2]) +prefix = 'blowUp{}x{}x{}_'.format(*options.packing) # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+ \ - os.path.basename(name)) if name else name, - buffered = False) + try: table = damask.ASCIItable(name = name, + outname = os.path.join(os.path.dirname(name), + prefix+os.path.basename(name)) if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -58,39 +60,41 @@ for name in filenames: # ------------------------------------------ sanity checks ---------------------------------------- - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: coordCol = table.label_index(options.coords) + errors = [] + remarks = [] + + if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + else: colCoord = table.label_index(options.coords) - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) + colElem = table.label_index('elem') + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray() - - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,coordCol+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness - - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + table.data_readArray(options.coords) + coords = [np.unique(table.data[:,i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings packing = np.array(options.packing,'i') outSize = grid*packing -# ------------------------------------------ assemble header --------------------------------------- +# ------------------------------------------ assemble header -------------------------------------- + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.head_write() # ------------------------------------------ process data ------------------------------------------- + table.data_rewind() data = np.zeros(outSize.tolist()+[len(table.labels)]) p = np.zeros(3,'i') @@ -102,15 +106,15 @@ for name in filenames: table.data_read() data[d[0]:d[0]+packing[0], d[1]:d[1]+packing[1], - d[2]:d[2]+packing[2], + d[2]:d[2]+packing[2], : ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size elementSize = size/grid/packing elem = 1 for c in xrange(outSize[2]): for b in xrange(outSize[1]): for a in xrange(outSize[0]): - data[a,b,c,coordCol:coordCol+3] = [a+0.5,b+0.5,c+0.5]*elementSize - data[a,b,c,table.label_index('elem')] = elem + data[a,b,c,colCoord:colCoord+3] = [a+0.5,b+0.5,c+0.5]*elementSize + if colElem != -1: data[a,b,c,colElem] = elem table.data = data[a,b,c,:].tolist() outputAlive = table.data_write() # output processed line elem += 1 diff --git a/processing/post/fillTable.py b/processing/post/fillTable.py index 120f4eb8d..fc40eed8f 100755 --- a/processing/post/fillTable.py +++ b/processing/post/fillTable.py @@ -1,7 +1,8 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,string,fnmatch,math,random +import os,re,sys +import math # noqa import numpy as np from optparse import OptionParser import damask @@ -42,7 +43,7 @@ parser.set_defaults(condition = '', (options,filenames) = parser.parse_args() -if options.labels == None or options.formulae == None: +if options.labels is None or options.formulae is None: parser.error('no formulae specified.') if len(options.labels) != len(options.formulae): parser.error('number of labels ({}) and formulae ({}) do not match.'.format(len(options.labels),len(options.formulae))) diff --git a/processing/post/filterTable.py b/processing/post/filterTable.py index 87a95750c..6723d9faf 100755 --- a/processing/post/filterTable.py +++ b/processing/post/filterTable.py @@ -1,7 +1,8 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,fnmatch,math,random +import os,re,sys,fnmatch +import math # noqa import numpy as np from optparse import OptionParser import damask @@ -34,7 +35,7 @@ Filter rows according to condition and columns by either white or black listing. Examples: Every odd row if x coordinate is positive -- " #ip.x# >= 0.0 and #_row_#%2 == 1 ). -All rows where label 'foo' equals 'bar' -- " #foo# == \"bar\" " +All rows where label 'foo' equals 'bar' -- " #s#foo# == 'bar' " """, version = scriptID) @@ -79,14 +80,14 @@ for name in filenames: positions = [] for position,label in enumerate(table.labels): - if (options.whitelist == None or any([ position in table.label_indexrange(needle) \ + if (options.whitelist is None or any([ position in table.label_indexrange(needle) \ or fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \ - and (options.blacklist == None or not any([ position in table.label_indexrange(needle) \ + and (options.blacklist is None or not any([ position in table.label_indexrange(needle) \ or fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep? labels.append(label) # remember name... positions.append(position) # ...and position - if len(labels) > 0 and options.whitelist != None and options.blacklist == None: # check whether reordering is possible + if len(labels) > 0 and options.whitelist is not None and options.blacklist is None: # check whether reordering is possible whitelistitem = np.zeros(len(labels),dtype=int) for i,label in enumerate(labels): # check each selected label match = [ positions[i] in table.label_indexrange(needle) \ @@ -118,7 +119,7 @@ for name in filenames: # ------------------------------------------ assemble header --------------------------------------- - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.labels_clear() table.labels_append(np.array(labels)[order]) # update with new label set table.head_write() diff --git a/processing/post/histogram.py b/processing/post/histogram.py new file mode 100755 index 000000000..d5d4cd185 --- /dev/null +++ b/processing/post/histogram.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 no BOM -*- + +import os,sys +import numpy as np +from optparse import OptionParser +import damask + +scriptName = os.path.splitext(os.path.basename(__file__))[0] +scriptID = ' '.join([scriptName,damask.version]) + +# -------------------------------------------------------------------- +# MAIN +# -------------------------------------------------------------------- + +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ +Generate histogram of N bins in given data range. + +""", version = scriptID) + +parser.add_option('-d','--data', + dest = 'data', + type = 'string', metavar = 'string', + help = 'column heading for data') +parser.add_option('-w','--weights', + dest = 'weights', + type = 'string', metavar = 'string', + help = 'column heading for weights') +parser.add_option('--range', + dest = 'range', + type = 'float', nargs = 2, metavar = 'float float', + help = 'data range of histogram [min - max]') +parser.add_option('-N', + dest = 'N', + type = 'int', metavar = 'int', + help = 'number of bins') +parser.add_option('--density', + dest = 'density', + action = 'store_true', + help = 'report probability density') +parser.add_option('--logarithmic', + dest = 'log', + action = 'store_true', + help = 'logarithmically spaced bins') +parser.set_defaults(data = None, + weights = None, + range = None, + N = None, + density = False, + log = False, + ) + +(options,filenames) = parser.parse_args() + +if not options.data: parser.error('no data specified.') +if not options.N: parser.error('no bin number specified.') + +if options.log: + def forward(x): + return np.log(x) + def reverse(x): + return np.exp(x) +else: + def forward(x): + return x + def reverse(x): + return x + + +# --- loop over input files ------------------------------------------------------------------------ + +if filenames == []: filenames = [None] + +for name in filenames: + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) + except: continue + damask.util.report(scriptName,name) + +# ------------------------------------------ read header ------------------------------------------ + + table.head_read() + +# ------------------------------------------ sanity checks ---------------------------------------- + + errors = [] + remarks = [] + + if table.label_dimension(options.data) != 1: errors.append('data {} are not scalar.'.format(options.data)) + if options.weights and \ + table.label_dimension(options.data) != 1: errors.append('weights {} are not scalar.'.format(options.weights)) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# --------------- read data ---------------------------------------------------------------- + + table.data_readArray([options.data,options.weights]) + +# --------------- auto range --------------------------------------------------------------- + + if options.range is None: + rangeMin,rangeMax = min(table.data[:,0]),max(table.data[:,0]) + else: + rangeMin,rangeMax = min(options.range),max(options.range) + +# --------------- bin data ---------------------------------------------------------------- + + count,edges = np.histogram(table.data[:,0], + bins = reverse(forward(rangeMin) + np.arange(options.N+1) * + (forward(rangeMax)-forward(rangeMin))/options.N), + range = (rangeMin,rangeMax), + weights = None if options.weights is None else table.data[:,1], + density = options.density, + ) + bincenter = reverse(forward(rangeMin) + (0.5+np.arange(options.N)) * + (forward(rangeMax)-forward(rangeMin))/options.N) # determine center of bins + +# ------------------------------------------ assemble header --------------------------------------- + + table.info_clear() + table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]), + scriptID + ':\t' + + 'data range {} -- {}'.format(rangeMin,rangeMax) + + (' (log)' if options.log else ''), + ]) + table.labels_clear() + table.labels_append(['bincenter','count']) + table.head_write() + +# ------------------------------------------ output result ----------------------------------------- + + table.data = np.squeeze(np.dstack((bincenter,count))) + table.data_writeArray() + +# ------------------------------------------ output finalization ----------------------------------- + + table.close() # close ASCII tables diff --git a/processing/post/imageData.py b/processing/post/imageData.py index da1616e94..e2c6658dd 100755 --- a/processing/post/imageData.py +++ b/processing/post/imageData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser from PIL import Image @@ -115,7 +115,7 @@ for name in filenames: try: table = damask.ASCIItable(name = name, buffered = False, - labeled = options.label != None, + labeled = options.label is not None, readonly = True) except: continue damask.util.report(scriptName,name) @@ -131,15 +131,15 @@ for name in filenames: damask.util.croak('column {} not found.'.format(options.label)) table.close(dismiss = True) # close ASCIItable and remove empty file continue - - # convert data to values between 0 and 1 and arrange according to given options +# convert data to values between 0 and 1 and arrange according to given options if options.dimension != []: table.data = table.data.reshape(options.dimension[1],options.dimension[0]) if options.abs: table.data = np.abs(table.data) if options.log: table.data = np.log10(table.data);options.range = np.log10(options.range) if options.flipLR: table.data = np.fliplr(table.data) if options.flipUD: table.data = np.flipud(table.data) - mask = np.logical_or(table.data == options.gap, np.isnan(table.data)) if options.gap else np.logical_not(np.isnan(table.data)) # mask gap and NaN (if gap present) + mask = np.logical_or(table.data == options.gap, np.isnan(table.data))\ + if options.gap else np.logical_not(np.isnan(table.data)) # mask gap and NaN (if gap present) if np.all(np.array(options.range) == 0.0): options.range = [table.data[mask].min(), table.data[mask].max()] @@ -176,7 +176,7 @@ for name in filenames: im.save(sys.stdout if not name else os.path.splitext(name)[0]+ \ - ('' if options.label == None else '_'+options.label)+ \ + ('' if options.label is None else '_'+options.label)+ \ '.png', format = "PNG") diff --git a/processing/post/imageDataDeformed.py b/processing/post/imageDataDeformed.py index 692d377f4..71086cf72 100755 --- a/processing/post/imageDataDeformed.py +++ b/processing/post/imageDataDeformed.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser from PIL import Image, ImageDraw @@ -112,7 +112,7 @@ for name in filenames: try: table = damask.ASCIItable(name = name, buffered = False, - labeled = options.label != None, + labeled = options.label is not None, readonly = True) except: continue table.report_name(scriptName,name) @@ -161,9 +161,10 @@ for name in filenames: ]) # find x-y bounding box for given z layer nodes -= boundingBox[0].repeat(np.prod(options.dimension+1)).reshape([3]+list(options.dimension+1)) - nodes *= (options.pixelsize*options.dimension/options.size).repeat(np.prod(options.dimension+1)).reshape([3]+list(options.dimension+1)) - imagesize = (options.pixelsize*(boundingBox[1]-boundingBox[0])*options.dimension\ - /options.size)[:2].astype('i') # determine image size from number of cells in overall bounding box + nodes *= (options.pixelsize*options.dimension/options.size).repeat(np.prod(options.dimension+1)).\ + reshape([3]+list(options.dimension+1)) + imagesize = (options.pixelsize*(boundingBox[1]-boundingBox[0])* # determine image size from number of + options.dimension/options.size)[:2].astype('i') # cells in overall bounding box im = Image.new('RGBA',imagesize) draw = ImageDraw.Draw(im) diff --git a/processing/post/imageDataRGB.py b/processing/post/imageDataRGB.py index 69310f529..f706cb100 100755 --- a/processing/post/imageDataRGB.py +++ b/processing/post/imageDataRGB.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser from PIL import Image @@ -80,7 +80,7 @@ for name in filenames: try: table = damask.ASCIItable(name = name, buffered = False, - labeled = options.label != None, + labeled = options.label is not None, readonly = True) except: continue damask.util.report(scriptName,name) @@ -98,14 +98,13 @@ for name in filenames: errors.append('column{} {} not found'.format('s' if len(missing_labels) > 1 else '', ', '.join(missing_labels))) if table.label_dimension(options.label) != 3: - errors.append('column {} has wrong dimension'.format(options.label)) + errors.append('column {} does not have dimension'.format(options.label)) if errors != []: damask.util.croak(errors) table.close(dismiss = True) # close ASCII table file handles and delete output file continue - - # convert data to shape and arrange according to given options +# convert data to shape and arrange according to given options if options.dimension != []: table.data = table.data.reshape(options.dimension[1],options.dimension[0],3) if options.flipLR: table.data = np.fliplr(table.data) if options.flipUD: table.data = np.flipud(table.data) diff --git a/processing/post/marc_deformedGeometry.py b/processing/post/marc_deformedGeometry.py index e59c982a6..f0fbee8b1 100755 --- a/processing/post/marc_deformedGeometry.py +++ b/processing/post/marc_deformedGeometry.py @@ -55,7 +55,7 @@ else: sys.path.append(damask.solver.Marc().libraryPath('../../')) try: - from py_post import * + import py_post except: print('error: no valid Mentat release found') sys.exit(-1) @@ -63,7 +63,7 @@ except: # --------------------------- open results file and initialize mesh ---------- -p = post_open(filename+'.t16') +p = py_post.post_open(filename+'.t16') p.moveto(0) Nnodes = p.nodes() Nincrements = p.increments() - 1 # t16 contains one "virtual" increment (at 0) @@ -114,7 +114,7 @@ for incCount,position in enumerate(locations): # walk through locations p.moveto(position+1) # wind to correct position - # --- get displacements +# --- get displacements node_displacement = [[0,0,0] for i in range(Nnodes)] for n in range(Nnodes): @@ -124,10 +124,11 @@ for incCount,position in enumerate(locations): # walk through locations cellnode_displacement = [[c[i][n] for i in range(3)] for n in range(Ncellnodes)] - # --- append displacements to corresponding files +# --- append displacements to corresponding files for geomtype in options.type: - outFilename = eval('"'+eval("'%%s_%%s_inc%%0%ii.vtk'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])') + outFilename = eval('"'+eval("'%%s_%%s_inc%%0%ii.vtk'%(math.log10(max(increments+[1]))+1)")\ + +'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])') print outFilename shutil.copyfile('%s_%s.vtk'%(filename,geomtype),outFilename) diff --git a/processing/post/marc_extractData.py b/processing/post/marc_extractData.py index 1e5b76f60..b920a9cdd 100755 --- a/processing/post/marc_extractData.py +++ b/processing/post/marc_extractData.py @@ -10,12 +10,8 @@ scriptID = ' '.join([scriptName,damask.version]) # ----------------------------- def ParseOutputFormat(filename,homogID,crystID,phaseID): -# -# parse .output* files in order to get a list of outputs -# ----------------------------- - - myID = { - 'Homogenization': homogID, + """parse .output* files in order to get a list of outputs""" + myID = {'Homogenization': homogID, 'Crystallite': crystID, 'Constitutive': phaseID, } @@ -61,24 +57,24 @@ def ParseOutputFormat(filename,homogID,crystID,phaseID): elif length > 0: format[what]['outputs'].append([output,length]) - if not '_id' in format[what]['specials']: + if '_id' not in format[what]['specials']: print "\nsection '%s' not found in <%s>"%(myID[what], what) print '\n'.join(map(lambda x:' [%s]'%x, format[what]['specials']['brothers'])) - + return format # ----------------------------- def ParsePostfile(p,filename, outputFormat, legacyFormat): -# -# parse postfile in order to get position and labels of outputs -# needs "outputFormat" for mapping of output names to postfile output indices -# ----------------------------- + """ + parse postfile in order to get position and labels of outputs + needs "outputFormat" for mapping of output names to postfile output indices + """ startVar = {True: 'GrainCount', False:'HomogenizationCount'} - # --- build statistics +# --- build statistics stat = { \ 'IndexOfLabel': {}, \ @@ -95,7 +91,7 @@ def ParsePostfile(p,filename, outputFormat, legacyFormat): 'LabelOfElementalTensor': [None]*p.element_tensors(), \ } - # --- find labels +# --- find labels for labelIndex in range(stat['NumberOfNodalScalars']): label = p.node_scalar_label(labelIndex) @@ -119,9 +115,9 @@ def ParsePostfile(p,filename, outputFormat, legacyFormat): startIndex = stat['IndexOfLabel'][startVar[legacyFormat]] stat['LabelOfElementalScalar'][startIndex] = startVar[legacyFormat] - # We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file - # Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels - # given in the .output* file +# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file +# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels +# given in the .output* file offset = 1 if legacyFormat: @@ -177,10 +173,7 @@ def ParsePostfile(p,filename, outputFormat, legacyFormat): # ----------------------------- def GetIncrementLocations(p,Nincrements,options): -# -# get mapping between positions in postfile and increment number -# ----------------------------- - + """get mapping between positions in postfile and increment number""" incAtPosition = {} positionOfInc = {} @@ -209,7 +202,6 @@ def GetIncrementLocations(p,Nincrements,options): # ----------------------------- def SummarizePostfile(stat,where=sys.stdout): -# ----------------------------- where.write('\n\n') where.write('title:\t%s'%stat['Title'] + '\n\n') @@ -217,16 +209,18 @@ def SummarizePostfile(stat,where=sys.stdout): where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n') where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n') where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n') - where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n') - where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') - where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') - + where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n '\ + +'\n '.join(stat['LabelOfNodalScalar']) + '\n\n') + where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n '\ + + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') + where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n '\ + + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') + return True # ----------------------------- def SummarizeOutputfile(format,where=sys.stdout): -# ----------------------------- where.write('\nUser Defined Outputs') for what in format.keys(): @@ -239,7 +233,6 @@ def SummarizeOutputfile(format,where=sys.stdout): # ----------------------------- def writeHeader(myfile,stat,geomtype): -# ----------------------------- myfile.write('2\theader\n') myfile.write(string.replace('$Id$','\n','\\n')+ @@ -316,7 +309,7 @@ if not os.path.exists(filename+'.t16'): sys.path.append(damask.solver.Marc().libraryPath('../../')) try: - from py_post import * + import py_post except: print('error: no valid Mentat release found') sys.exit(-1) @@ -336,14 +329,14 @@ if damask.core.mesh.mesh_init_postprocessing(filename+'.mesh'): # --- check if ip data available for all elements; if not, then .t19 file is required -p = post_open(filename+'.t16') +p = py_post.post_open(filename+'.t16') asciiFile = False p.moveto(1) for e in range(p.elements()): if not damask.core.mesh.mesh_get_nodeAtIP(str(p.element(e).type),1): if os.path.exists(filename+'.t19'): p.close() - p = post_open(filename+'.t19') + p = py_post.post_open(filename+'.t19') asciiFile = True break @@ -383,18 +376,20 @@ time_start = time.time() for incCount,position in enumerate(locations): # walk through locations p.moveto(position+1) # wind to correct position time_delta = (float(len(locations)) / float(incCount+1) - 1.0) * (time.time() - time_start) - sys.stdout.write("\r(%02i:%02i:%02i) processing increment %i of %i..."%(time_delta//3600,time_delta%3600//60,time_delta%60,incCount+1,len(locations))) + sys.stdout.write("\r(%02i:%02i:%02i) processing increment %i of %i..."\ + %(time_delta//3600,time_delta%3600//60,time_delta%60,incCount+1,len(locations))) sys.stdout.flush() - # --- write header +# --- write header outFilename = {} for geomtype in options.type: - outFilename[geomtype] = eval('"'+eval("'%%s_%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])') + outFilename[geomtype] = eval('"'+eval("'%%s_%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")\ + +'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])') with open(outFilename[geomtype],'w') as myfile: writeHeader(myfile,stat,geomtype) - # --- write node based data + # --- write node based data if geomtype == 'nodebased': for n in range(stat['NumberOfNodes']): @@ -403,7 +398,7 @@ for incCount,position in enumerate(locations): # walk through locations myfile.write('\t'+str(p.node_scalar(n,l))) myfile.write('\n') - # --- write ip based data + # --- write ip based data elif geomtype == 'ipbased': for e in range(stat['NumberOfElements']): @@ -424,5 +419,3 @@ for incCount,position in enumerate(locations): # walk through locations p.close() sys.stdout.write("\n") - -# --------------------------- DONE -------------------------------- diff --git a/processing/post/mentat_colorMap.py b/processing/post/mentat_colorMap.py index 8e630df9d..c2ce63238 100755 --- a/processing/post/mentat_colorMap.py +++ b/processing/post/mentat_colorMap.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os, sys, string +import os,sys import damask from optparse import OptionParser @@ -14,9 +14,9 @@ def outMentat(cmd,locals): exec(cmd[3:]) elif cmd[0:3] == '(?)': cmd = eval(cmd[3:]) - py_send(cmd) + py_mentat.py_send(cmd) else: - py_send(cmd) + py_mentat.py_send(cmd) return @@ -59,7 +59,8 @@ def colorMap(colors,baseIdx=32): # MAIN FUNCTION STARTS HERE # ----------------------------- -parser = OptionParser(option_class=damask.extendableOption, usage="%prog [options] predefinedScheme | (lower_h,s,l upper_h,s,l)", description = """ +parser = OptionParser(option_class=damask.extendableOption, +usage="%prog [options] predefinedScheme | (lower_h,s,l upper_h,s,l)", description = """ Changes the color map in MSC.Mentat. Interpolates colors between "lower_hsl" and "upper_hsl". @@ -121,13 +122,12 @@ if options.palettef: elif options.palette: for theColor in theMap.export(format='list',steps=options.colorcount): print '\t'.join(map(lambda x: str(int(255*x)),theColor)) -else: -### connect to Mentat and change colorMap +else: # connect to Mentat and change colorMap sys.path.append(damask.solver.Marc().libraryPath('../../')) try: - from py_mentat import * + import py_mentat print 'waiting to connect...' - py_connect('',options.port) + py_mentat.py_connect('',options.port) print 'connected...' mentat = True except: @@ -138,7 +138,7 @@ else: cmds = colorMap(theMap.export(format='list',steps=options.colorcount),options.baseIdx) if mentat: output(['*show_table']+cmds+['*show_model *redraw'],outputLocals,'Mentat') - py_disconnect() + py_mentat.py_disconnect() if options.verbose: output(cmds,outputLocals,'Stdout') diff --git a/processing/post/perceptualUniformColorMap.py b/processing/post/perceptualUniformColorMap.py index f84928f23..c2201f76b 100755 --- a/processing/post/perceptualUniformColorMap.py +++ b/processing/post/perceptualUniformColorMap.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import string,sys,os +import sys,os import damask from optparse import OptionParser @@ -9,7 +9,7 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) # -------------------------------------------------------------------- - # MAIN +# MAIN # -------------------------------------------------------------------- #Borland, D., & Taylor, R. M. (2007). Rainbow Color Map (Still) Considered Harmful. Computer Graphics and Applications, IEEE, 27(2), 14--17. #Moreland, K. (2009). Diverging Color Maps for Scientific Visualization. In Proc. 5th Int. Symp. Visual Computing (pp. 92--103). @@ -62,8 +62,10 @@ if options.trim[0] < -1.0 or \ parser.error('invalid trim range (-1 +1).') -name = options.format if options.basename == None else options.basename -output = sys.stdout if options.basename == None else open(os.path.basename(options.basename)+extensions[outtypes.index(options.format)],'w') +name = options.format if options.basename is None\ + else options.basename +output = sys.stdout if options.basename is None\ + else open(os.path.basename(options.basename)+extensions[outtypes.index(options.format)],'w') colorLeft = damask.Color(options.colormodel.upper(), list(options.left)) colorRight = damask.Color(options.colormodel.upper(), list(options.right)) diff --git a/processing/post/permuteData.py b/processing/post/permuteData.py index f873ca447..9669f1b20 100755 --- a/processing/post/permuteData.py +++ b/processing/post/permuteData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -79,7 +79,7 @@ for name in filenames: # ------------------------------------------ assemble header --------------------------------------- - randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file + randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed is None else options.randomSeed # random seed per file np.random.seed(randomSeed) table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]), diff --git a/processing/post/postResults.py b/processing/post/postResults.py index 0dfca8279..95b9eabf8 100755 --- a/processing/post/postResults.py +++ b/processing/post/postResults.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,re,threading,time,struct,string +import os,sys,math,re,time,struct,string import damask from optparse import OptionParser, OptionGroup @@ -17,7 +17,6 @@ fileExtensions = { \ # ----------------------------- class vector: # mimic py_post node object -# ----------------------------- x,y,z = [None,None,None] def __init__(self,coords): @@ -27,7 +26,6 @@ class vector: # mimic py_post node object # ----------------------------- class element: # mimic py_post element object -# ----------------------------- items = [] type = None @@ -37,7 +35,6 @@ class element: # mimic py_post element object # ----------------------------- class elemental_scalar: # mimic py_post element_scalar object -# ----------------------------- id = None value = None @@ -48,7 +45,6 @@ class elemental_scalar: # mimic py_post element_scalar object # ----------------------------- class MPIEspectral_result: # mimic py_post result object -# ----------------------------- file = None dataOffset = 0 @@ -68,7 +64,8 @@ class MPIEspectral_result: # mimic py_post result object increment = 0 startingIncrement = 0 position = 0 - time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to +# this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to + time = 0.0 N_nodes = 0 N_node_scalars = 0 N_elements = 0 @@ -87,40 +84,40 @@ class MPIEspectral_result: # mimic py_post result object self.dataOffset += 7 #search first for the new keywords with ':', if not found try to find the old ones self.theTitle = self._keyedString('load:') - if self.theTitle == None: + if self.theTitle is None: self.theTitle = self._keyedString('load') self.wd = self._keyedString('workingdir:') - if self.wd == None: + if self.wd is None: self.wd = self._keyedString('workingdir') self.geometry = self._keyedString('geometry:') - if self.geometry == None: + if self.geometry is None: self.geometry = self._keyedString('geometry') self.N_loadcases = self._keyedPackedArray('loadcases:',count=1,type='i')[0] - if self.N_loadcases == None: + if self.N_loadcases is None: self.N_loadcases = self._keyedPackedArray('loadcases',count=1,type='i')[0] self._frequencies = self._keyedPackedArray('frequencies:',count=self.N_loadcases,type='i') - if all ( i == None for i in self._frequencies): + if all ( i is None for i in self._frequencies): self._frequencies = self._keyedPackedArray('frequencies',count=self.N_loadcases,type='i') self._increments = self._keyedPackedArray('increments:',count=self.N_loadcases,type='i') - if all (i == None for i in self._increments): + if all (i is None for i in self._increments): self._increments = self._keyedPackedArray('increments',count=self.N_loadcases,type='i') self.startingIncrement = self._keyedPackedArray('startingIncrement:',count=1,type='i')[0] - if self.startingIncrement == None: + if self.startingIncrement is None: self.startingIncrement = self._keyedPackedArray('startingIncrement',count=1,type='i')[0] self._times = self._keyedPackedArray('times:',count=self.N_loadcases,type='d') - if all (i == None for i in self._times): + if all (i is None for i in self._times): self._times = self._keyedPackedArray('times',count=self.N_loadcases,type='d') self._logscales = self._keyedPackedArray('logscales:',count=self.N_loadcases,type='i') - if all (i == None for i in self._logscales): + if all (i is None for i in self._logscales): self._logscales = self._keyedPackedArray('logscales',count=self.N_loadcases,type='i') self.size = self._keyedPackedArray('size:',count=3,type='d') @@ -135,7 +132,7 @@ class MPIEspectral_result: # mimic py_post result object self.N_elements = self.grid[0] * self.grid[1] * self.grid[2] self.N_element_scalars = self._keyedPackedArray('materialpoint_sizeResults:',count=1,type='i')[0] - if self.N_element_scalars == None: + if self.N_element_scalars is None: self.N_element_scalars = self._keyedPackedArray('materialpoint_sizeResults',count=1,type='i')[0] self.N_positions = (self.filesize-self.dataOffset)/(self.N_elements*self.N_element_scalars*8) @@ -156,8 +153,7 @@ class MPIEspectral_result: # mimic py_post result object print '\n**\n* Unexpected file size. Incomplete simulation or file corrupted!\n**' def __str__(self): - - + """Summary of results file""" return '\n'.join([ 'workdir: %s'%self.wd, 'geometry: %s'%self.geometry, @@ -181,13 +177,14 @@ class MPIEspectral_result: # mimic py_post result object filepos=0 # start at the beginning while name != identifier and filepos < self.dataOffset: # stop searching when found or when reached end of header self.file.seek(filepos) - dataLen=struct.unpack('i',self.file.read(4))[0] # read the starting tag in front of the keyword (Fortran indicates start and end of writing by a 4 byte tag indicating the length of the following data) - name = self.file.read(len(identifier)) # anticipate identifier - start=filepos+(4+len(identifier)) # this is the position where the values for the found key are stored - filepos=filepos+(4+dataLen+4) # forward to next keyword +# read the starting tag in front of the keyword (Fortran indicates start and end of writing by a 4 byte tag indicating the length of the following data) + dataLen=struct.unpack('i',self.file.read(4))[0] + name = self.file.read(len(identifier)) # anticipate identifier + start=filepos+(4+len(identifier)) # position of the values for the found key + filepos=filepos+(4+dataLen+4) # forward to next keyword - if name==identifier: # found the correct name - key['pos'] = start # save position + if name==identifier: # found the correct name + key['pos'] = start # save position key['name'] = name return key @@ -195,7 +192,7 @@ class MPIEspectral_result: # mimic py_post result object bytecount = {'d': 8,'i': 4} values = [default]*count key = self.locateKeyValue(identifier) - if key['name'] == identifier and key['pos'] != None: + if key['name'] == identifier and key['pos'] is not None: self.file.seek(key['pos']) for i in range(count): values[i] = struct.unpack(type,self.file.read(bytecount[type]))[0] @@ -286,8 +283,6 @@ class MPIEspectral_result: # mimic py_post result object if not options.legacy: incStart = self.dataOffset \ + self.position*8*self.N_elements*self.N_element_scalars - # header & footer + extra header and footer for 4 byte int range (Fortran) - # values where = (e*self.N_element_scalars + idx)*8 try: self.file.seek(incStart+where) @@ -299,15 +294,15 @@ class MPIEspectral_result: # mimic py_post result object else: self.fourByteLimit = 2**31 -1 -8 +# header & footer + extra header and footer for 4 byte int range (Fortran) +# values incStart = self.dataOffset \ + self.position*8*( 1 + self.N_elements*self.N_element_scalars*8//self.fourByteLimit \ + self.N_elements*self.N_element_scalars) - # header & footer + extra header and footer for 4 byte int range (Fortran) - # values where = (e*self.N_element_scalars + idx)*8 try: - if where%self.fourByteLimit + 8 >= self.fourByteLimit: # danger of reading into fortran record footer at 4 byte limit + if where%self.fourByteLimit + 8 >= self.fourByteLimit: # danger of reading into fortran record footer at 4 byte limit data='' for i in xrange(8): self.file.seek(incStart+where+(where//self.fourByteLimit)*8+4) @@ -329,51 +324,10 @@ class MPIEspectral_result: # mimic py_post result object def element_tensors(self): return self.N_element_tensors - -# ----------------------------- -class backgroundMessage(threading.Thread): -# ----------------------------- - - def __init__(self): - threading.Thread.__init__(self) - self.message = '' - self.new_message = '' - self.counter = 0 - self.symbols = ['- ', '\ ', '| ', '/ ',] - self.waittime = 0.5 - - def __quit__(self): - length = len(self.message) + len(self.symbols[self.counter]) - sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) - sys.stderr.write('') - - def run(self): - while not threading.enumerate()[0]._Thread__stopped: - time.sleep(self.waittime) - self.update_message() - self.__quit__() - - def set_message(self, new_message): - self.new_message = new_message - self.print_message() - - def print_message(self): - length = len(self.message) + len(self.symbols[self.counter]) - sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message - sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message - self.message = self.new_message - - def update_message(self): - self.counter = (self.counter + 1)%len(self.symbols) - self.print_message() - # ----------------------------- def ipCoords(elemType, nodalCoordinates): -# -# returns IP coordinates for a given element -# ----------------------------- - + """returns IP coordinates for a given element""" nodeWeightsPerNode = { 7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0], [ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0], @@ -422,10 +376,7 @@ def ipCoords(elemType, nodalCoordinates): # ----------------------------- def ipIDs(elemType): -# -# returns IP numbers for given element type -# ----------------------------- - + """returns IP numbers for given element type""" ipPerNode = { 7: [ 1, 2, 4, 3, 5, 6, 8, 7 ], 57: [ 1, 2, 4, 3, 5, 6, 8, 7 ], @@ -441,9 +392,7 @@ def ipIDs(elemType): # ----------------------------- def substituteLocation(string, mesh, coords): -# -# do variable interpolation in group and filter strings -# ----------------------------- + """do variable interpolation in group and filter strings""" substitute = string substitute = substitute.replace('elem', str(mesh[0])) substitute = substitute.replace('node', str(mesh[1])) @@ -458,10 +407,7 @@ def substituteLocation(string, mesh, coords): # ----------------------------- def heading(glue,parts): -# -# joins pieces from parts by glue. second to last entry in pieces tells multiplicity -# ----------------------------- - + """joins pieces from parts by glue. second to last entry in pieces tells multiplicity""" header = [] for pieces in parts: if pieces[-2] == 0: @@ -473,12 +419,12 @@ def heading(glue,parts): # ----------------------------- def mapIncremental(label, mapping, N, base, new): -# -# applies the function defined by "mapping" -# (can be either 'min','max','avg', 'sum', or user specified) -# to a list of data -# ----------------------------- + """ + applies the function defined by "mapping" + (can be either 'min','max','avg', 'sum', or user specified) + to a list of data + """ theMap = { 'min': lambda n,b,a: a if n==0 else min(b,a), 'max': lambda n,b,a: a if n==0 else max(b,a), 'avg': lambda n,b,a: (n*b+a)/(n+1), @@ -504,10 +450,7 @@ def mapIncremental(label, mapping, N, base, new): # ----------------------------- def OpenPostfile(name,type,nodal = False): -# -# open postfile with extrapolation mode "translate" -# ----------------------------- - + """open postfile with extrapolation mode 'translate'""" p = {\ 'spectral': MPIEspectral_result,\ 'marc': post_open,\ @@ -520,10 +463,7 @@ def OpenPostfile(name,type,nodal = False): # ----------------------------- def ParseOutputFormat(filename,what,me): -# -# parse .output* files in order to get a list of outputs -# ----------------------------- - + """parse .output* files in order to get a list of outputs""" content = [] format = {'outputs':{},'specials':{'brothers':[]}} for prefix in ['']+map(str,range(1,17)): @@ -567,13 +507,11 @@ def ParseOutputFormat(filename,what,me): # ----------------------------- def ParsePostfile(p,filename, outputFormat): -# -# parse postfile in order to get position and labels of outputs -# needs "outputFormat" for mapping of output names to postfile output indices -# ----------------------------- - - # --- build statistics + """ + parse postfile in order to get position and labels of outputs + needs "outputFormat" for mapping of output names to postfile output indices + """ stat = { \ 'IndexOfLabel': {}, \ 'Title': p.title(), \ @@ -589,7 +527,7 @@ def ParsePostfile(p,filename, outputFormat): 'LabelOfElementalTensor': [None]*p.element_tensors(), \ } - # --- find labels +# --- find labels for labelIndex in range(stat['NumberOfNodalScalars']): label = p.node_scalar_label(labelIndex) @@ -613,9 +551,9 @@ def ParsePostfile(p,filename, outputFormat): startIndex = stat['IndexOfLabel']['HomogenizationCount'] stat['LabelOfElementalScalar'][startIndex] = 'HomogenizationCount' - # We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file - # Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels - # given in the .output* file +# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file +# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels +# given in the .output* file offset = 1 for (name,N) in outputFormat['Homogenization']['outputs']: @@ -663,7 +601,6 @@ def ParsePostfile(p,filename, outputFormat): # ----------------------------- def SummarizePostfile(stat,where=sys.stdout,format='marc'): -# ----------------------------- where.write('\n\n') where.write('title:\t%s'%stat['Title'] + '\n\n') @@ -671,9 +608,12 @@ def SummarizePostfile(stat,where=sys.stdout,format='marc'): where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n') where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n') where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n') - where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n') - where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') - where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') + where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n '\ + +'\n '.join(stat['LabelOfNodalScalar']) + '\n\n') + where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n '\ + + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') + where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n '\ + + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') return True @@ -799,14 +739,14 @@ if not os.path.exists(files[0]): # --- figure out filetype -if options.filetype == None: +if options.filetype is None: ext = os.path.splitext(files[0])[1] for theType in fileExtensions.keys(): if ext in fileExtensions[theType]: options.filetype = theType break -if options.filetype != None: options.filetype = options.filetype.lower() +if options.filetype is not None: options.filetype = options.filetype.lower() if options.filetype == 'marc': offset_pos = 1 else: offset_pos = 0 @@ -822,7 +762,7 @@ if options.filetype == 'marc': sys.path.append(damask.solver.Marc().libraryPath('../../')) try: - from py_post import * + from py_post import post_open except: print('error: no valid Mentat release found') sys.exit(-1) @@ -834,7 +774,7 @@ if options.constitutiveResult and not options.phase: parser.print_help() parser.error('constitutive results require phase...') -if options.nodalScalar and ( options.elemScalar or options.elemTensor +if options.nodalScalar and ( options.elemScalar or options.elemTensor\ or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ): parser.print_help() parser.error('not allowed to mix nodal with elemental results...') @@ -851,7 +791,7 @@ options.sep.reverse() # --- start background messaging -bg = backgroundMessage() +bg = damask.util.backgroundMessage() bg.start() # --- parse .output and .t16 files @@ -874,7 +814,7 @@ bg.set_message('parsing .output files...') for what in me: outputFormat[what] = ParseOutputFormat(filename, what, me[what]) - if not '_id' in outputFormat[what]['specials']: + if '_id' not in outputFormat[what]['specials']: print "\nsection '%s' not found in <%s>"%(me[what], what) print '\n'.join(map(lambda x:' [%s]'%x, outputFormat[what]['specials']['brothers'])) @@ -886,15 +826,18 @@ if options.filetype == 'marc': stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0) # --- sanity check for output variables -# for mentat variables (nodalScalar,elemScalar,elemTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary -# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different +# for mentat variables (nodalScalar,elemScalar,elemTensor) we simply have to check whether the label +# is found in the stat[indexOfLabel] dictionary for user defined variables (homogenizationResult, +# crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the +# namescheme in stat['IndexOfLabel'] is different for opt in ['nodalScalar','elemScalar','elemTensor','homogenizationResult','crystalliteResult','constitutiveResult']: if eval('options.%s'%opt): for label in eval('options.%s'%opt): if (opt in ['nodalScalar','elemScalar','elemTensor'] and label not in stat['IndexOfLabel'] and label not in ['elements',]) \ or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \ - and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])): + and (not outputFormat[opt[:-6].capitalize()]['outputs'] \ + or label not in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])): parser.error('%s "%s" unknown...'%(opt,label)) @@ -952,15 +895,14 @@ if options.nodalScalar: myIpID = 0 myGrainID = 0 - # --- filter valid locations - - filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates an expression that is only true for the locations specified by options.filter + # generate an expression that is only true for the locations specified by options.filter + filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) if filter != '' and not eval(filter): # for all filter expressions that are not true:... continue # ... ignore this data point and continue with next # --- group data locations - - grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location + # generate a unique key for a group of separated data based on the separation criterium for the location + grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) if grp not in index: # create a new group if not yet present index[grp] = groupCount @@ -983,26 +925,28 @@ else: if e%1000 == 0: bg.set_message('scan elem %i...'%e) myElemID = p.element_id(e) - myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence, p.element(e).items)))) + myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], + map(p.node, map(p.node_sequence, p.element(e).items)))) myIpIDs = ipIDs(p.element(e).type) Nips = len(myIpIDs) myNodeIDs = p.element(e).items[:Nips] for n in range(Nips): myIpID = myIpIDs[n] myNodeID = myNodeIDs[n] - for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value)) - or 1): + for g in range(('GrainCount' in stat['IndexOfLabel']\ + and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))\ + or 1): myGrainID = g + 1 # --- filter valid locations - - filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter + # generates an expression that is only true for the locations specified by options.filter + filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) if filter != '' and not eval(filter): # for all filter expressions that are not true:... continue # ... ignore this data point and continue with next # --- group data locations - - grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location + # generates a unique key for a group of separated data based on the separation criterium for the location + grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) if grp not in index: # create a new group if not yet present index[grp] = groupCount @@ -1059,11 +1003,8 @@ fileOpen = False assembleHeader = True header = [] standard = ['inc'] + \ - {True: ['time'], - False:[]}[options.time] + \ - ['elem','node','ip','grain'] + \ - {True: ['1_nodeinitialcoord','2_nodeinitialcoord','3_nodeinitialcoord'], - False:['1_ipinitialcoord','2_ipinitialcoord','3_ipinitialcoord']}[options.nodalScalar != []] + ['time'] if options.time else [] + \ + ['elem','node','ip','grain','1_pos','2_pos','3_pos'] # --------------------------- loop over positions -------------------------------- @@ -1104,7 +1045,8 @@ for incCount,position in enumerate(locations): # walk through locations if fileOpen: file.close() fileOpen = False - outFilename = eval('"'+eval("'%%s_inc%%0%ii%%s.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount],options.suffix)') + outFilename = eval('"'+eval("'%%s_inc%%0%ii%%s.txt'%(math.log10(max(increments+[1]))+1)")\ + +'"%(dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount],options.suffix)') else: outFilename = '%s.txt'%(dirname + os.sep + options.prefix + os.path.split(filename)[1] + options.suffix) @@ -1128,7 +1070,8 @@ for incCount,position in enumerate(locations): # walk through locations member += 1 if member%1000 == 0: time_delta = ((len(locations)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start) - bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i (position %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increments[incCount],position)) + bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i (position %i)...' + %(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increments[incCount],position)) newby = [] # current member's data @@ -1140,7 +1083,9 @@ for incCount,position in enumerate(locations): # walk through locations else: length = 1 content = [ p.node_scalar(p.node_sequence(n),stat['IndexOfLabel'][label]) ] - if assembleHeader: header += heading('_',[[component,''.join( label.split() )] for component in range(int(length>1),length+int(length>1))]) + if assembleHeader: + header += heading('_',[[component,''.join( label.split() )] + for component in range(int(length>1),length+int(length>1))]) newby.append({'label':label, 'len':length, 'content':content }) @@ -1156,7 +1101,8 @@ for incCount,position in enumerate(locations): # walk through locations if options.elemTensor: for label in options.elemTensor: if assembleHeader: - header += heading('.',[[''.join( label.split() ),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']]) + header += heading('.',[[''.join( label.split() ),component] + for component in ['intensity','t11','t22','t33','t12','t23','t13']]) myTensor = p.element_tensor(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local] newby.append({'label':label, 'len':7, diff --git a/processing/post/reLabel.py b/processing/post/reLabel.py index d74080efb..e70a1166e 100755 --- a/processing/post/reLabel.py +++ b/processing/post/reLabel.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys,re import damask from optparse import OptionParser @@ -32,14 +32,17 @@ parser.set_defaults(label = [], (options,filenames) = parser.parse_args() +pattern = [re.compile('^()(.+)$'), # label pattern for scalar + re.compile('^(\d+_)?(.+)$'), # label pattern for multidimension + ] + # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) + try: table = damask.ASCIItable(name = name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -63,8 +66,9 @@ for name in filenames: for i,index in enumerate(indices): if index == -1: remarks.append('label {} not present...'.format(options.label[i])) else: + m = pattern[dimensions[i]>1].match(table.labels[index]) # isolate label name for j in xrange(dimensions[i]): - table.labels[index+j] = table.labels[index+j].replace(options.label[i],options.substitute[i]) + table.labels[index+j] = table.labels[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute if remarks != []: damask.util.croak(remarks) if errors != []: diff --git a/processing/post/rotateData.py b/processing/post/rotateData.py index 42e24579a..36bda7761 100755 --- a/processing/post/rotateData.py +++ b/processing/post/rotateData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -41,7 +41,7 @@ parser.set_defaults(rotation = (0.,1.,1.,1.), (options,filenames) = parser.parse_args() -if options.vector == None and options.tensor == None: +if options.vector is None and options.tensor is None: parser.error('no data column specified.') toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians @@ -107,8 +107,7 @@ for name in filenames: for column in items[datatype]['column']: # loop over all requested labels table.data[column:column+items[datatype]['dim']] = \ np.dot(R,np.dot(np.array(map(float,table.data[column:column+items[datatype]['dim']])).\ - reshape(items[datatype]['shape']),R.transpose())).\ - reshape(items[datatype]['dim']) + reshape(items[datatype]['shape']),R.transpose())).reshape(items[datatype]['dim']) outputAlive = table.data_write() # output processed line diff --git a/processing/post/sortTable.py b/processing/post/sortTable.py index 4735062dc..92fe221ee 100755 --- a/processing/post/sortTable.py +++ b/processing/post/sortTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -36,7 +36,7 @@ parser.set_defaults(key = [], (options,filenames) = parser.parse_args() -if options.keys == None: +if options.keys is None: parser.error('No sorting column(s) specified.') options.keys.reverse() # numpy sorts with most significant column as last diff --git a/processing/post/stddevDown.py b/processing/post/stddevDown.py index 880961511..e14ee0058 100755 --- a/processing/post/stddevDown.py +++ b/processing/post/stddevDown.py @@ -144,16 +144,12 @@ for file in files: posOffset = (shift+[0.5,0.5,0.5])*dimension/resolution elementSize = dimension/resolution*packing - elem = 1 for c in xrange(downSized[2]): for b in xrange(downSized[1]): for a in xrange(downSized[0]): datavar[a,b,c,locationCol:locationCol+3] = posOffset + [a,b,c]*elementSize - datavar[a,b,c,elemCol] = elem table.data = datavar[a,b,c,:].tolist() table.data_write() # output processed line - elem += 1 - # ------------------------------------------ output result --------------------------------------- diff --git a/processing/post/vtk2ang.py b/processing/post/vtk2ang.py index 639158293..40b06e41d 100755 --- a/processing/post/vtk2ang.py +++ b/processing/post/vtk2ang.py @@ -4,17 +4,15 @@ import os,string,math,sys import numpy as np from optparse import OptionParser -from vtk import * +import vtk +import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) # ----------------------------- def getHeader(filename,sizeFastIndex,sizeSlowIndex,stepsize): -# ----------------------------- -# returns header for ang file -# step size in micrometer - + """returns header for ang file step size in micrometer""" return '\n'.join([ \ '# TEM_PIXperUM 1.000000', \ '# x-star 1.000000', \ @@ -50,10 +48,7 @@ def getHeader(filename,sizeFastIndex,sizeSlowIndex,stepsize): # ----------------------------- def positiveRadians(angle): -# ----------------------------- -# returns positive angle in radians -# gets angle in degrees - + """returns positive angle in radians from angle in degrees""" angle = math.radians(float(angle)) while angle < 0.0: angle += 2.0 * math.pi @@ -63,14 +58,16 @@ def positiveRadians(angle): # ----------------------------- def getDataLine(angles,x,y,validData=True): -# ----------------------------- -# returns string of one line in ang file -# convention in ang file: y coordinate comes first and is fastest index -# positions in micrometer - + """ + returns string of one line in ang file + + convention in ang file: y coordinate comes first and is fastest index + positions in micrometer + """ info = {True: (9999.9, 1.0, 0,99999,0.0), False: ( -1.0,-1.0,-1, -1,1.0)} - return '%9.5f %9.5f %9.5f %12.5f %12.5f %6.1f %6.3f %2i %6i %6.3f \n'%(tuple(map(positiveRadians,angles))+(y*1e6,x*1e6)+info[validData]) + return '%9.5f %9.5f %9.5f %12.5f %12.5f %6.1f %6.3f %2i %6i %6.3f \n'\ + %(tuple(map(positiveRadians,angles))+(y*1e6,x*1e6)+info[validData]) @@ -155,10 +152,9 @@ if options.hexagonal: for filename in filenames: - # Read the source file - if options.verbose: sys.stdout.write("\nREADING VTK FILE\n") - reader = vtkUnstructuredGridReader() +# Read the source file + reader = vtk.vtkUnstructuredGridReader() reader.SetFileName(filename) reader.ReadAllScalarsOn() reader.ReadAllVectorsOn() @@ -166,7 +162,7 @@ for filename in filenames: undeformedMesh = reader.GetOutput() - # Get euler angles from cell data +# Get euler angles from cell data if options.verbose: sys.stdout.write("\nGETTING EULER ANGLES\n") angles = {} @@ -177,14 +173,14 @@ for filename in filenames: if options.verbose: sys.stdout.write(" found scalar with name %s\n"%scalarName) if len(angles) < 3: # found data for all three euler angles? for label in options.eulerLabel: - if not label in angles.keys(): + if label not in angles.keys(): parser.error('Could not find scalar data with name %s'%label) - # Get deformed mesh +# Get deformed mesh if options.verbose: sys.stdout.write("\nDEFORM MESH\n") - warpVector = vtkWarpVector() + warpVector = vtk.vtkWarpVector() undeformedMesh.GetPointData().SetActiveVectors(options.dispLabel) warpVector.SetInput(undeformedMesh) warpVector.Update() @@ -197,29 +193,29 @@ for filename in filenames: sys.stdout.write(" z (% .8f % .8f)\n"%(box[4],box[5])) - # Get cell centers of deformed mesh (position of ips) +# Get cell centers of deformed mesh (position of ips) if options.verbose: sys.stdout.write("\nGETTING CELL CENTERS OF DEFORMED MESH\n") - cellCenter = vtkCellCenters() + cellCenter = vtk.vtkCellCenters() cellCenter.SetVertexCells(0) # do not generate vertex cells, just points cellCenter.SetInput(deformedMesh) cellCenter.Update() meshIPs = cellCenter.GetOutput() - # Get outer surface of deformed mesh +# Get outer surface of deformed mesh if options.verbose: sys.stdout.write("\nGETTING OUTER SURFACE OF DEFORMED MESH\n") - surfaceFilter = vtkDataSetSurfaceFilter() + surfaceFilter = vtk.vtkDataSetSurfaceFilter() surfaceFilter.SetInput(deformedMesh) surfaceFilter.Update() surface = surfaceFilter.GetOutput() - # Get coordinate system for ang files - # z-vector is normal to slices - # x-vector corresponds to the up-direction - # "R" rotates coordinates from the mesh system into the TSL system +# Get coordinate system for ang files +# z-vector is normal to slices +# x-vector corresponds to the up-direction +# "R" rotates coordinates from the mesh system into the TSL system if options.verbose: sys.stdout.write("\nGETTING COORDINATE SYSTEM FOR ANG FILES\n") z = np.array(options.normal,dtype='float') @@ -235,7 +231,7 @@ for filename in filenames: sys.stdout.write(" z (% .8f % .8f % .8f)\n"%tuple(z)) - # Get bounding box in rotated system (x,y,z) +# Get bounding box in rotated system (x,y,z) if options.verbose: sys.stdout.write("\nGETTING BOUNDING BOX IN ROTATED SYSTEM\n") rotatedbox = [[np.inf,-np.inf] for i in range(3)] # bounding box in rotated TSL system @@ -254,8 +250,8 @@ for filename in filenames: sys.stdout.write(" z (% .8f % .8f)\n"%tuple(rotatedbox[2])) - # Correct bounding box so that a multiplicity of the resolution fits into it - # and get number of points and extent in each (rotated) axis direction +# Correct bounding box so that a multiplicity of the resolution fits into it +# and get number of points and extent in each (rotated) axis direction if options.verbose: sys.stdout.write("\nCORRECTING EXTENT OF BOUNDING BOX IN ROTATED SYSTEM\n") correction = [] @@ -284,12 +280,12 @@ for filename in filenames: sys.stdout.write(" z (% .8f % .8f)\n"%tuple(rotatedbox[2])) - # Generate new regular point grid for ang files - # Use "polydata" object with points as single vertices - # beware of TSL convention: y direction is fastest index +# Generate new regular point grid for ang files +# Use "polydata" object with points as single vertices +# beware of TSL convention: y direction is fastest index if options.verbose: sys.stdout.write("\nGENERATING POINTS FOR POINT GRID") - points = vtkPoints() + points = vtk.vtkPoints() for k in xrange(Npoints[2]): for j in xrange(Npoints[0]): for i in xrange(Npoints[1]): # y is fastest index @@ -309,9 +305,9 @@ for filename in filenames: sys.stdout.write(" grid resolution: %.8f\n"%options.resolution) if options.verbose: sys.stdout.write("\nGENERATING VERTICES FOR POINT GRID") - vertices = vtkCellArray() + vertices = vtk.vtkCellArray() for i in xrange(totalNpoints): - vertex = vtkVertex() + vertex = vtk.vtkVertex() vertex.GetPointIds().SetId(0,i) # each vertex consists of exactly one (index 0) point with ID "i" vertices.InsertNextCell(vertex) if options.verbose: @@ -319,34 +315,35 @@ for filename in filenames: sys.stdout.flush() if options.verbose: sys.stdout.write("\n\nGENERATING POINT GRID\n") - pointgrid = vtkPolyData() + pointgrid = vtk.vtkPolyData() pointgrid.SetPoints(points) pointgrid.SetVerts(vertices) pointgrid.Update() - # Find out which points reside inside mesh geometry +# Find out which points reside inside mesh geometry if options.verbose: sys.stdout.write("\nIDENTIFYING POINTS INSIDE MESH GEOMETRY\n") - enclosedPoints = vtkSelectEnclosedPoints() + enclosedPoints = vtk.vtkSelectEnclosedPoints() enclosedPoints.SetSurface(surface) enclosedPoints.SetInput(pointgrid) enclosedPoints.Update() - # Build kdtree from mesh IPs and match mesh IPs to point grid +# Build kdtree from mesh IPs and match mesh IPs to point grid if options.verbose: sys.stdout.write("\nBUILDING MAPPING OF GRID POINTS") - kdTree = vtkKdTree() + kdTree = vtk.vtkKdTree() kdTree.BuildLocatorFromPoints(meshIPs.GetPoints()) gridToMesh = [] - ids = vtkIdList() + ids = vtk.vtkIdList() NenclosedPoints = 0 for i in range(pointgrid.GetNumberOfPoints()): gridToMesh.append([]) if enclosedPoints.IsInside(i): NenclosedPoints += 1 - kdTree.FindClosestNPoints(options.interpolation,pointgrid.GetPoint(i),ids) # here one could use faster(?) "FindClosestPoint" if only first nearest neighbor required +# here one could use faster(?) "FindClosestPoint" if only first nearest neighbor required + kdTree.FindClosestNPoints(options.interpolation,pointgrid.GetPoint(i),ids) for j in range(ids.GetNumberOfIds()): gridToMesh[-1].extend([ids.GetId(j)]) if options.verbose: @@ -358,7 +355,7 @@ for filename in filenames: - # ITERATE OVER SLICES AND CREATE ANG FILE +# ITERATE OVER SLICES AND CREATE ANG FILE if options.verbose: sys.stdout.write("\nWRITING OUT ANG FILES\n") @@ -404,13 +401,13 @@ for filename in filenames: angfile.write(getDataLine(interpolatedPhi,x,y,enclosedPoints.IsInside(i))) - # Visualize slices +# Visualize slices if options.visualize: - meshMapper = vtkDataSetMapper() + meshMapper = vtk.vtkDataSetMapper() meshMapper.SetInput(surface) meshMapper.ScalarVisibilityOff() # do not use scalar data for coloring - meshActor = vtkActor() + meshActor = vtk.vtkActor() meshActor.SetMapper(meshMapper) meshActor.GetProperty().SetOpacity(0.2) meshActor.GetProperty().SetColor(1.0,1.0,0) @@ -418,43 +415,43 @@ for filename in filenames: # meshActor.GetProperty().SetEdgeColor(1,1,0.5) # meshActor.GetProperty().EdgeVisibilityOn() - boxpoints = vtkPoints() + boxpoints = vtk.vtkPoints() for n in range(8): P = [rotatedbox[0][(n/1)%2], rotatedbox[1][(n/2)%2], rotatedbox[2][(n/4)%2]] boxpoints.InsertNextPoint(list(np.dot(R.T,np.array(P)))) - box = vtkHexahedron() + box = vtk.vtkHexahedron() for n,i in enumerate([0,1,3,2,4,5,7,6]): box.GetPointIds().SetId(n,i) - boxgrid = vtkUnstructuredGrid() + boxgrid = vtk.vtkUnstructuredGrid() boxgrid.SetPoints(boxpoints) boxgrid.InsertNextCell(box.GetCellType(), box.GetPointIds()) - boxsurfaceFilter = vtkDataSetSurfaceFilter() + boxsurfaceFilter = vtk.vtkDataSetSurfaceFilter() boxsurfaceFilter.SetInput(boxgrid) boxsurfaceFilter.Update() boxsurface = boxsurfaceFilter.GetOutput() - boxMapper = vtkDataSetMapper() + boxMapper = vtk.vtkDataSetMapper() boxMapper.SetInput(boxsurface) - boxActor = vtkActor() + boxActor = vtk.vtkActor() boxActor.SetMapper(boxMapper) boxActor.GetProperty().SetLineWidth(2.0) boxActor.GetProperty().SetRepresentationToWireframe() - gridMapper = vtkDataSetMapper() + gridMapper = vtk.vtkDataSetMapper() gridMapper.SetInput(pointgrid) - gridActor = vtkActor() + gridActor = vtk.vtkActor() gridActor.SetMapper(gridMapper) gridActor.GetProperty().SetColor(0,0,0) gridActor.GetProperty().SetPointSize(3) - renderer = vtkRenderer() - renderWindow = vtkRenderWindow() + renderer = vtk.vtkRenderer() + renderWindow = vtk.vtkRenderWindow() renderWindow.FullScreenOn() renderWindow.AddRenderer(renderer) - renderWindowInteractor = vtkRenderWindowInteractor() + renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(meshActor) renderer.AddActor(boxActor) @@ -462,6 +459,6 @@ for filename in filenames: renderer.SetBackground(1,1,1) renderWindow.Render() - renderWindowInteractor.SetInteractorStyle(vtkInteractorStyleTrackballCamera()) + renderWindowInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) renderWindowInteractor.Start() diff --git a/processing/post/vtk_addData.py b/processing/post/vtk_addData.py index b93f6e174..97937dd86 100755 --- a/processing/post/vtk_addData.py +++ b/processing/post/vtk_addData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,string,glob,re +import os,glob,re import damask from optparse import OptionParser @@ -10,7 +10,6 @@ scriptID = ' '.join([scriptName,damask.version]) # ----------------------------- def findTag(filename,tag): -# ----------------------------- with open(filename,'r') as myfile: mypattern = re.compile(str(tag)) diff --git a/processing/post/vtk_addPointcloudData.py b/processing/post/vtk_addPointcloudData.py index 2dfbdd144..58190de27 100755 --- a/processing/post/vtk_addPointcloudData.py +++ b/processing/post/vtk_addPointcloudData.py @@ -1,8 +1,9 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,vtk import damask +from collections import defaultdict from optparse import OptionParser scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -17,125 +18,157 @@ Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp). """, version = scriptID) -parser.add_option('-v', '--vtk', dest='vtk', \ +parser.add_option( '--vtk', + dest = 'vtk', + type = 'string', metavar = 'string', help = 'VTK file name') +parser.add_option( '--inplace', + dest = 'inplace', + action = 'store_true', + help = 'modify VTK file in-place') +parser.add_option('-r', '--render', + dest = 'render', + action = 'store_true', + help = 'open output in VTK render window') parser.add_option('-s', '--scalar', dest='scalar', action='extend', \ help = 'scalar values') +parser.add_option('-v', '--vector', + dest = 'vector', + action = 'extend', metavar = '', + help = 'vector value label(s)') parser.add_option('-c', '--color', dest='color', action='extend', \ help = 'RGB color tuples') -parser.set_defaults(scalar = []) -parser.set_defaults(color = []) +parser.set_defaults(scalar = [], + vector = [], + color = [], + inplace = False, + render = False, +) (options, filenames) = parser.parse_args() -datainfo = { # list of requested labels per datatype - 'scalar': {'len':1, - 'label':[]}, - 'color': {'len':3, - 'label':[]}, - } - -if not os.path.exists(options.vtk): - parser.error('VTK file does not exist'); sys.exit() +if not options.vtk: parser.error('No VTK file specified.') +if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') reader = vtk.vtkXMLPolyDataReader() reader.SetFileName(options.vtk) reader.Update() -Npoints = reader.GetNumberOfPoints() -Ncells = reader.GetNumberOfCells() +Npoints = reader.GetNumberOfPoints() +Ncells = reader.GetNumberOfCells() Nvertices = reader.GetNumberOfVerts() -Polydata = reader.GetOutput() +Polydata = reader.GetOutput() if Npoints != Ncells or Npoints != Nvertices: - parser.error('Number of points, cells, and vertices in VTK differ from each other'); sys.exit() -if options.scalar != None: datainfo['scalar']['label'] += options.scalar -if options.color != None: datainfo['color']['label'] += options.color + parser.error('Number of points, cells, and vertices in VTK differ from each other.') -# ------------------------------------------ setup file handles --------------------------------------- +damask.util.croak('{}: {} points, {} vertices, and {} cells...'.format(options.vtk,Npoints,Nvertices,Ncells)) -files = [] -if filenames == []: - files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) -else: - for name in filenames: - if os.path.exists(name): - files.append({'name':name, 'input':open(name), 'output':sys.stderr, 'croak':sys.stderr}) +# --- loop over input files ------------------------------------------------------------------------- -#--- loop over input files ------------------------------------------------------------------------ -for file in files: - if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') - else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') +if filenames == []: filenames = [None] - table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table - table.head_read() # read ASCII header info +for name in filenames: + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) + except: continue + damask.util.report(scriptName, name) -# --------------- figure out columns to process - active = {} - column = {} +# --- interpret header ---------------------------------------------------------------------------- - array = {} + table.head_read() + + remarks = [] + errors = [] + VTKarray = {} + active = defaultdict(list) - for datatype,info in datainfo.items(): - for label in info['label']: - foundIt = False - for key in ['1_'+label,label]: - if key in table.labels: - foundIt = True - if datatype not in active: active[datatype] = [] - if datatype not in column: column[datatype] = {} - if datatype not in array: array[datatype] = {} - active[datatype].append(label) - column[datatype][label] = table.labels.index(key) # remember columns of requested data - if datatype == 'scalar': - array[datatype][label] = vtk.vtkDoubleArray() - array[datatype][label].SetNumberOfComponents(1) - array[datatype][label].SetName(label) - elif datatype == 'color': - array[datatype][label] = vtk.vtkUnsignedCharArray() - array[datatype][label].SetNumberOfComponents(3) - array[datatype][label].SetName(label) - if not foundIt: - file['croak'].write('column %s not found...\n'%label) - + for datatype,dimension,label in [['scalar',1,options.scalar], + ['vector',3,options.vector], + ['color',3,options.color], + ]: + for i,dim in enumerate(table.label_dimension(label)): + me = label[i] + if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me)) + elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension)) + else: + remarks.append('adding {} "{}"...'.format(datatype,me)) + active[datatype].append(me) + + if datatype in ['scalar','vector']: VTKarray[me] = vtk.vtkDoubleArray() + elif datatype == 'color': VTKarray[me] = vtk.vtkUnsignedCharArray() + + VTKarray[me].SetNumberOfComponents(dimension) + VTKarray[me].SetName(label[i]) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + # ------------------------------------------ process data --------------------------------------- - while table.data_read(): # read next data line of ASCII table + while table.data_read(): # read next data line of ASCII table - for datatype,labels in active.items(): # loop over scalar,color - for label in labels: # loop over all requested items - theData = table.data[column[datatype][label]:\ - column[datatype][label]+datainfo[datatype]['len']] # read strings - if datatype == 'color': - theData = map(lambda x: int(255.*float(x)),theData) - array[datatype][label].InsertNextTuple3(theData[0],theData[1],theData[2],) - elif datatype == 'scalar': - array[datatype][label].InsertNextValue(float(theData[0])) + for datatype,labels in active.items(): # loop over scalar,color + for me in labels: # loop over all requested items + theData = [table.data[i] for i in table.label_indexrange(me)] # read strings + if datatype == 'color': VTKarray[me].InsertNextTuple3(*map(lambda x: int(255.*float(x)),theData)) + elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) + elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) table.input_close() # close input ASCII table # ------------------------------------------ add data --------------------------------------- - for datatype,labels in active.items(): # loop over scalar,color + for datatype,labels in active.items(): # loop over scalar,color if datatype == 'color': - Polydata.GetPointData().SetScalars(array[datatype][labels[0]]) - Polydata.GetCellData().SetScalars(array[datatype][labels[0]]) - for label in labels: # loop over all requested items - Polydata.GetPointData().AddArray(array[datatype][label]) - Polydata.GetCellData().AddArray(array[datatype][label]) + Polydata.GetPointData().SetScalars(VTKarray[active['color'][0]]) + Polydata.GetCellData().SetScalars(VTKarray[active['color'][0]]) + for me in labels: # loop over all requested items + Polydata.GetPointData().AddArray(VTKarray[me]) + Polydata.GetCellData().AddArray(VTKarray[me]) Polydata.Modified() - if vtk.VTK_MAJOR_VERSION <= 5: - Polydata.Update() + if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update() # ------------------------------------------ output result --------------------------------------- -writer = vtk.vtkXMLPolyDataWriter() -writer.SetDataModeToBinary() -writer.SetCompressorTypeToZLib() -writer.SetFileName(os.path.splitext(options.vtk)[0]+'_added.vtp') -if vtk.VTK_MAJOR_VERSION <= 5: - writer.SetInput(Polydata) -else: - writer.SetInputData(Polydata) -writer.Write() + writer = vtk.vtkXMLPolyDataWriter() + writer.SetDataModeToBinary() + writer.SetCompressorTypeToZLib() + writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtp' if options.inplace else '_added.vtp')) + if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata) + else: writer.SetInputData(Polydata) + writer.Write() + +# ------------------------------------------ render result --------------------------------------- + +if options.render: + mapper = vtk.vtkDataSetMapper() + mapper.SetInputData(Polydata) + actor = vtk.vtkActor() + actor.SetMapper(mapper) + +# Create the graphics structure. The renderer renders into the +# render window. The render window interactor captures mouse events +# and will perform appropriate camera or actor manipulation +# depending on the nature of the events. + + ren = vtk.vtkRenderer() + + renWin = vtk.vtkRenderWindow() + renWin.AddRenderer(ren) + + ren.AddActor(actor) + ren.SetBackground(1, 1, 1) + renWin.SetSize(200, 200) + + iren = vtk.vtkRenderWindowInteractor() + iren.SetRenderWindow(renWin) + + iren.Initialize() + renWin.Render() + iren.Start() diff --git a/processing/post/vtk_addRectilinearGridData.py b/processing/post/vtk_addRectilinearGridData.py index b1e35d2cd..d54bb4cf4 100755 --- a/processing/post/vtk_addRectilinearGridData.py +++ b/processing/post/vtk_addRectilinearGridData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,vtk import damask from collections import defaultdict from optparse import OptionParser @@ -30,10 +30,6 @@ parser.add_option('-r', '--render', dest = 'render', action = 'store_true', help = 'open output in VTK render window') -parser.add_option('-m', '--mode', - dest = 'mode', - type = 'choice', metavar = 'string', choices = ['cell', 'point'], - help = 'cell-centered or point-centered data') parser.add_option('-s', '--scalar', dest = 'scalar', action = 'extend', metavar = '', @@ -56,7 +52,6 @@ parser.set_defaults(scalar = [], (options, filenames) = parser.parse_args() -if not options.mode: parser.error('No data mode specified.') if not options.vtk: parser.error('No VTK file specified.') if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') @@ -83,9 +78,9 @@ damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Nce if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, readonly = True) + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) except: continue damask.util.report(scriptName, name) @@ -124,8 +119,11 @@ for name in filenames: # ------------------------------------------ process data --------------------------------------- + datacount = 0 + while table.data_read(): # read next data line of ASCII table - + + datacount += 1 # count data lines for datatype,labels in active.items(): # loop over scalar,color for me in labels: # loop over all requested items theData = [table.data[i] for i in table.label_indexrange(me)] # read strings @@ -133,15 +131,25 @@ for name in filenames: elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) + table.close() # close input ASCII table + # ------------------------------------------ add data --------------------------------------- + if datacount == Npoints: mode = 'point' + elif datacount == Ncells: mode = 'cell' + else: + damask.util.croak('Data count is incompatible with grid...') + continue + + damask.util.croak('{} mode...'.format(mode)) + for datatype,labels in active.items(): # loop over scalar,color if datatype == 'color': - if options.mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) - elif options.mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) + if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) + elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) for me in labels: # loop over all requested items - if options.mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) - elif options.mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) + if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) + elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) rGrid.Modified() if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update() @@ -151,7 +159,7 @@ for name in filenames: writer = vtk.vtkXMLRectilinearGridWriter() writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.splitext(options.vtk)[0]+('' if options.inplace else '_added.vtr')) + writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr')) if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid) else: writer.SetInputData(rGrid) writer.Write() @@ -164,10 +172,10 @@ if options.render: actor = vtk.vtkActor() actor.SetMapper(mapper) - # Create the graphics structure. The renderer renders into the - # render window. The render window interactor captures mouse events - # and will perform appropriate camera or actor manipulation - # depending on the nature of the events. +# Create the graphics structure. The renderer renders into the +# render window. The render window interactor captures mouse events +# and will perform appropriate camera or actor manipulation +# depending on the nature of the events. ren = vtk.vtkRenderer() @@ -181,9 +189,6 @@ if options.render: iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) - #ren.ResetCamera() - #ren.GetActiveCamera().Zoom(1.5) - iren.Initialize() renWin.Render() iren.Start() diff --git a/processing/post/vtk_addVoxelcloudData.py b/processing/post/vtk_addVoxelcloudData.py index 61dfb0581..549b80be8 100755 --- a/processing/post/vtk_addVoxelcloudData.py +++ b/processing/post/vtk_addVoxelcloudData.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,sys,vtk import damask from collections import defaultdict from optparse import OptionParser @@ -32,7 +32,7 @@ parser.set_defaults(scalar = [], (options, filenames) = parser.parse_args() -if options.vtk == None or not os.path.exists(options.vtk): +if options.vtk is None or not os.path.exists(options.vtk): parser.error('VTK file does not exist') if os.path.splitext(options.vtk)[1] == '.vtu': diff --git a/processing/post/vtk_pointcloud.py b/processing/post/vtk_pointcloud.py index b49c75c10..f35911135 100755 --- a/processing/post/vtk_pointcloud.py +++ b/processing/post/vtk_pointcloud.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,sys,vtk import numpy as np import damask from optparse import OptionParser @@ -18,12 +18,12 @@ Produce a VTK point cloud dataset based on coordinates given in an ASCIItable. """, version = scriptID) -parser.add_option('-d', '--deformed', - dest = 'deformed', +parser.add_option('-c', '--coordinates', + dest = 'pos', type = 'string', metavar = 'string', - help = 'deformed coordinate label [%default]') + help = 'coordinate label [%default]') -parser.set_defaults(deformed = 'ipdeformedcoord' +parser.set_defaults(pos = 'pos' ) (options, filenames) = parser.parse_args() @@ -46,9 +46,9 @@ for name in filenames: errors = [] remarks = [] - coordDim = table.label_dimension(options.deformed) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.deformed)) - elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.deformed)) + coordDim = table.label_dimension(options.pos) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.pos)) if remarks != []: damask.util.croak(remarks) if errors != []: @@ -58,7 +58,7 @@ for name in filenames: # ------------------------------------------ process data --------------------------------------- - table.data_readArray(options.deformed) + table.data_readArray(options.pos) if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape if table.data.shape[1] < 3: table.data = np.hstack((table.data, @@ -86,8 +86,8 @@ for name in filenames: (directory,filename) = os.path.split(name) writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] - +'.'+writer.GetDefaultFileExtension())) + writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0]\ + +'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() writer.WriteToOutputStringOn() @@ -96,6 +96,6 @@ for name in filenames: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata) else: writer.SetInputData(Polydata) writer.Write() - if name == None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) + if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) table.close() diff --git a/processing/post/vtk_rectilinearGrid.py b/processing/post/vtk_rectilinearGrid.py index 758e4df22..6f1228ad8 100755 --- a/processing/post/vtk_rectilinearGrid.py +++ b/processing/post/vtk_rectilinearGrid.py @@ -24,11 +24,11 @@ parser.add_option('-m', '--mode', type = 'choice', choices = ['cell','point'], help = 'cell-centered or point-centered coordinates ') parser.add_option('-c', '--coordinates', - dest = 'position', + dest = 'coords', type = 'string', metavar = 'string', help = 'coordinate label [%default]') -parser.set_defaults(position ='ipinitialcoord', - mode ='cell' +parser.set_defaults(coords = 'pos', + mode = 'cell' ) (options, filenames) = parser.parse_args() @@ -38,9 +38,9 @@ parser.set_defaults(position ='ipinitialcoord', if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, readonly = True) + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) except: continue damask.util.report(scriptName,name) @@ -48,10 +48,13 @@ for name in filenames: table.head_read() - errors = [] - if table.label_dimension(options.position) != 3: - errors.append('coordinates {} are not a vector.'.format(options.position)) + remarks = [] + errors = [] + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords)) + if remarks != []: damask.util.croak(remarks) if errors != []: damask.util.croak(errors) table.close(dismiss=True) @@ -59,7 +62,12 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray(options.position) + table.data_readArray(options.coords) + if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape + if table.data.shape[1] < 3: + table.data = np.hstack((table.data, + np.zeros((table.data.shape[0], + 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros coords = [np.unique(table.data[:,i]) for i in xrange(3)] if options.mode == 'cell': @@ -101,7 +109,7 @@ for name in filenames: writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] \ - +'_{}({})'.format(options.position, options.mode) \ + +'_{}({})'.format(options.coords, options.mode) \ +'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() @@ -111,6 +119,6 @@ for name in filenames: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid) else: writer.SetInputData(rGrid) writer.Write() - if name == None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) + if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) table.close() diff --git a/processing/post/vtk_scalars2vectors.py b/processing/post/vtk_scalars2vectors.py index 72b59588c..4aca33236 100755 --- a/processing/post/vtk_scalars2vectors.py +++ b/processing/post/vtk_scalars2vectors.py @@ -1,10 +1,10 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,shutil +import os,sys,shutil import damask from optparse import OptionParser -from vtk import * +import vtk scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -44,18 +44,16 @@ for filename in filenames: for filename in filenames: - # Read the source file - sys.stdout.write('read file "%s" ...'%filename) sys.stdout.flush() suffix = os.path.splitext(filename)[1] if suffix == '.vtk': - reader = vtkUnstructuredGridReader() + reader = vtk.vtkUnstructuredGridReader() reader.ReadAllScalarsOn() reader.ReadAllVectorsOn() reader.ReadAllTensorsOn() elif suffix == '.vtu': - reader = vtkXMLUnstructuredGridReader() + reader = vtk.vtkXMLUnstructuredGridReader() else: parser.error('filetype "%s" not supported'%suffix) reader.SetFileName(filename) @@ -65,7 +63,7 @@ for filename in filenames: sys.stdout.flush() - # Read the scalar data +# Read the scalar data scalarData = {} scalarsToBeRemoved = [] @@ -83,19 +81,18 @@ for filename in filenames: scalarsToBeRemoved.append(scalarName) for scalarName in scalarsToBeRemoved: uGrid.GetCellData().RemoveArray(scalarName) - # uGrid.UpdateData() sys.stdout.write('\rread scalar data done\n') sys.stdout.flush() - # Convert the scalar data to vector data +# Convert the scalar data to vector data NscalarData = len(scalarData) for n,label in enumerate(scalarData): sys.stdout.write("\rconvert to vector data %d%%" %(100*n/NscalarData)) sys.stdout.flush() Nvalues = scalarData[label][0].GetNumberOfTuples() - vectorData = vtkDoubleArray() + vectorData = vtk.vtkDoubleArray() vectorData.SetName(label) vectorData.SetNumberOfComponents(3) # set this before NumberOfTuples !!! vectorData.SetNumberOfTuples(Nvalues) @@ -103,16 +100,15 @@ for filename in filenames: for j in range(3): vectorData.SetComponent(i,j,scalarData[label][j].GetValue(i)) uGrid.GetCellData().AddArray(vectorData) - # uGrid.GetCellData().SetActiveVectors(label) sys.stdout.write('\rconvert to vector data done\n') - # Write to new vtk file +# Write to new vtk file outfilename = os.path.splitext(filename)[0]+'.vtu' sys.stdout.write('write to file "%s" ...'%outfilename) sys.stdout.flush() - writer = vtkXMLUnstructuredGridWriter() + writer = vtk.vtkXMLUnstructuredGridWriter() writer.SetFileName(outfilename+'_tmp') writer.SetDataModeToAscii() writer.SetInput(uGrid) diff --git a/processing/post/vtk_voxelcloud.py b/processing/post/vtk_voxelcloud.py index 47eb4df1a..239771c74 100755 --- a/processing/post/vtk_voxelcloud.py +++ b/processing/post/vtk_voxelcloud.py @@ -48,9 +48,10 @@ for name in filenames: # --------------- interprete header ----------------------------------------------------------------- table.head_read() errors=[] - if table.label_dimension(options.deformed) != 3: errors.append('columns "{}" have dimension {}'.format(options.deformed, - table.label_dimension(options.deformed))) - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + if table.label_dimension(options.deformed) != 3: + errors.append('columns "{}" have dimension {}'.format(options.deformed,table.label_dimension(options.deformed))) + if table.label_dimension(options.coords) != 3: + errors.append('coordinates {} are not a vector.'.format(options.coords)) table.data_readArray([options.coords,options.deformed]) @@ -66,7 +67,7 @@ for name in filenames: max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ ],'d') # size from bounding box, corrected for cell-centeredness - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings # ------------------------------------------ process data --------------------------------------- hexPoints = np.array([[-1,-1,-1], @@ -101,8 +102,8 @@ for name in filenames: (directory,filename) = os.path.split(name) writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] - +'.'+writer.GetDefaultFileExtension())) + writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0]\ + +'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() writer.WriteToOutputStringOn() @@ -111,7 +112,7 @@ for name in filenames: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(uGrid) else: writer.SetInputData(uGrid) writer.Write() - if name == None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) + if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) table.close() # close input ASCII table diff --git a/processing/pre/OIMlinear2linearODF.py b/processing/pre/OIMlinear2linearODF.py index 980f861af..280457be4 100755 --- a/processing/pre/OIMlinear2linearODF.py +++ b/processing/pre/OIMlinear2linearODF.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: UTF-8 no BOM -*- -import os,string,sys,re +import os,sys from optparse import OptionParser import numpy as np import damask @@ -73,7 +73,7 @@ for file in files: for iPhi1 in range(nPhi1): for iPHI in range(nPHI): for iPhi2 in range(nPhi2): - ODF[iPhi1,iPHI,iPhi2] = float(line.split()[3])*0.125 # extract intensity (in column 4) and weight by 1/8 (since we convert from the 8 corners to the center later on) + ODF[iPhi1,iPHI,iPhi2] = float(line.split()[3])*0.125 # extract intensity (in column 4) and weight by 1/8 line = file['input'].readline() for iPhi1 in range(nPhi1-1): diff --git a/processing/pre/abq_addUserOutput.py b/processing/pre/abq_addUserOutput.py index b14399509..1e0614c21 100755 --- a/processing/pre/abq_addUserOutput.py +++ b/processing/pre/abq_addUserOutput.py @@ -1,16 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -''' -Writes meaningful labels to the Abaqus input file (*.inp) -based on the files -.output -that are written during the first run of the model. -See Abaqus Keyword Reference Manual (AKRM) *DEPVAR for details. -Original script: marc_addUserOutput.py modified by Benjamin Bode -''' - -import sys,os,re,string +import sys,os,re from optparse import OptionParser import damask @@ -19,7 +10,6 @@ scriptID = ' '.join([scriptName,damask.version]) # ----------------------------- def ParseOutputFormat(filename,what,me): -# ----------------------------- format = {'outputs':{},'specials':{'brothers':[]}} outputmetafile = filename+'.output'+what @@ -120,7 +110,7 @@ for file in files: for what in me: outputFormat[what] = ParseOutputFormat(formatFile,what,me[what]) - if not '_id' in outputFormat[what]['specials']: + if '_id' not in outputFormat[what]['specials']: print "'%s' not found in <%s>"%(me[what],what) print '\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers'])) sys.exit(1) @@ -164,19 +154,14 @@ for file in files: if m: lastSection = thisSection thisSection = m.group(1) - #Abaqus keyword can be upper or lower case - if (lastSection.upper() == '*DEPVAR' and thisSection.upper() == '*USER'): - #Abaqus SDVs are named SDV1...SDVn if no specific name is given - #Abaqus needs total number of SDVs in the line after *Depvar keyword + if (lastSection.upper() == '*DEPVAR' and thisSection.upper() == '*USER'): #Abaqus keyword can be upper or lower case if options.number > 0: - #number of SDVs - output.write('%i\n'%options.number) + output.write('%i\n'%options.number) #Abaqus needs total number of SDVs in the line after *Depvar keyword else: - #number of SDVs output.write('%i\n'%len(UserVars)) - #index,output variable key,output variable description + for i in range(len(UserVars)): - output.write('%i,"%i%s","%i%s"\n'%(i+1,0,UserVars[i],0,UserVars[i])) + output.write('%i,"%i%s","%i%s"\n'%(i+1,0,UserVars[i],0,UserVars[i])) #index,output variable key,output variable description if (thisSection.upper() != '*DEPVAR' or not re.match('\s*\d',line)): output.write(line) output.close() diff --git a/processing/pre/geom_addPrimitive.py b/processing/pre/geom_addPrimitive.py index 9200f2df7..3dd60bc24 100755 --- a/processing/pre/geom_addPrimitive.py +++ b/processing/pre/geom_addPrimitive.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -114,7 +114,7 @@ for name in filenames: microstructure = microstructure.reshape(info['grid'],order='F') - if options.dimension != None: + if options.dimension is not None: mask = (np.array(options.dimension) < 0).astype(float) # zero where positive dimension, otherwise one dim = abs(np.array(options.dimension)) # dimensions of primitive body pos = np.zeros(3,dtype='float') @@ -134,10 +134,9 @@ for name in filenames: # --- report --------------------------------------------------------------------------------------- + if ( newInfo['microstructures'] != info['microstructures']): + damask.util.croak('--> microstructures: %i'%newInfo['microstructures']) - remarks = [] - if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures']) - if remarks != []: damask.util.croak(remarks) #--- write header --------------------------------------------------------------------------------- diff --git a/processing/pre/geom_canvas.py b/processing/pre/geom_canvas.py index 93df742da..ec171f522 100755 --- a/processing/pre/geom_canvas.py +++ b/processing/pre/geom_canvas.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -28,24 +28,32 @@ parser.add_option('-o', '--offset', help = 'a,b,c offset from old to new origin of grid [%default]') parser.add_option('-f', '--fill', dest = 'fill', - type = 'int', metavar = 'int', + type = 'float', metavar = 'float', help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]') +parser.add_option('--float', + dest = 'real', + action = 'store_true', + help = 'input data is float [%default]') parser.set_defaults(grid = ['0','0','0'], offset = (0,0,0), fill = 0, + real = False, ) (options, filenames) = parser.parse_args() +datatype = 'f' if options.real else 'i' + + # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, labeled = False) + try: table = damask.ASCIItable(name = name, + buffered = False, + labeled = False) except: continue damask.util.report(scriptName,name) @@ -71,7 +79,7 @@ for name in filenames: # --- read data ------------------------------------------------------------------------------------ - microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure + microstructure = table.microstructure_read(info['grid'],datatype).reshape(info['grid'],order='F') # read microstructure # --- do work ------------------------------------------------------------------------------------ @@ -81,11 +89,12 @@ for name in filenames: 'microstructures': 0, } - newInfo['grid'] = np.array([int(o*float(n.translate(None,'xX'))) if n[-1].lower() == 'x' else int(n) for o,n in zip(info['grid'],options.grid)],'i') + newInfo['grid'] = np.array([int(o*float(n.translate(None,'xX'))) if n[-1].lower() == 'x'\ + else int(n) for o,n in zip(info['grid'],options.grid)],'i') newInfo['grid'] = np.where(newInfo['grid'] > 0, newInfo['grid'],info['grid']) - microstructure_cropped = np.zeros(newInfo['grid'],'i') - microstructure_cropped.fill(options.fill if options.fill > 0 else microstructure.max()+1) + microstructure_cropped = np.zeros(newInfo['grid'],datatype) + microstructure_cropped.fill(options.fill if options.real or options.fill > 0 else microstructure.max()+1) xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \ set(xrange(info['grid'][0]))) yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \ @@ -143,7 +152,7 @@ for name in filenames: "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=newInfo['origin']), "homogenization\t{homog}".format(homog=info['homogenization']), "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']), - extra_header + extra_header ]) table.labels_clear() table.head_write() @@ -151,9 +160,9 @@ for name in filenames: # --- write microstructure information ------------------------------------------------------------ - formatwidth = int(math.floor(math.log10(microstructure_cropped.max())+1)) + format = '%g' if options.real else '%{}i'.format(int(math.floor(math.log10(microstructure_cropped.max())+1))) table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose() - table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') + table.data_writeArray(format,delimiter=' ') # --- output finalization -------------------------------------------------------------------------- diff --git a/processing/pre/geom_check.py b/processing/pre/geom_check.py index 3f9e50864..518e8c3b0 100755 --- a/processing/pre/geom_check.py +++ b/processing/pre/geom_check.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,sys,vtk import numpy as np from optparse import OptionParser import damask @@ -54,12 +54,25 @@ for name in filenames: errors = [] if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') + +#--- read microstructure information -------------------------------------------------------------- + + if options.data: + microstructure,ok = table.microstructure_read(info['grid'],strict = True) # read microstructure + + if ok: + structure = vtk.vtkIntArray() + structure.SetName('Microstructures') + for idx in microstructure: structure.InsertNextValue(idx) + + else: errors.append('mismatch between data and grid dimension.') + if errors != []: damask.util.croak(errors) table.close(dismiss = True) continue -# --- generate VTK rectilinear grid -------------------------------------------------------------------------------- +# --- generate VTK rectilinear grid --------------------------------------------------------------- grid = vtk.vtkRectilinearGrid() grid.SetDimensions([x+1 for x in info['grid']]) @@ -72,18 +85,8 @@ for name in filenames: elif i == 1: grid.SetYCoordinates(temp) elif i == 2: grid.SetZCoordinates(temp) -#--- read microstructure information -------------------------------------------------------------- - if options.data: - microstructure = table.microstructure_read(info['grid']) # read microstructure - - structure = vtk.vtkIntArray() - structure.SetName('Microstructures') - - for idx in microstructure: - structure.InsertNextValue(idx) - - grid.GetCellData().AddArray(structure) + if options.data: grid.GetCellData().AddArray(structure) # --- write data ----------------------------------------------------------------------------------- if name: @@ -91,8 +94,7 @@ for name in filenames: (directory,filename) = os.path.split(name) writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] - +'.'+writer.GetDefaultFileExtension())) + writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0]+'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() writer.WriteToOutputStringOn() @@ -101,6 +103,6 @@ for name in filenames: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid) else: writer.SetInputData(grid) writer.Write() - if name == None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) + if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) table.close() diff --git a/processing/pre/geom_clean.py b/processing/pre/geom_clean.py index b55149a61..2bef9dc08 100755 --- a/processing/pre/geom_clean.py +++ b/processing/pre/geom_clean.py @@ -1,20 +1,17 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np import damask from scipy import ndimage from optparse import OptionParser -from collections import defaultdict scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def mostFrequent(arr): - d = defaultdict(int) - for i in arr: d[i] += 1 - return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0] # return value of most frequent microstructure + return np.argmax(np.bincount(arr)) #-------------------------------------------------------------------------------------------------- @@ -43,10 +40,9 @@ parser.set_defaults(stencil = 3, if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, - labeled = False) + try: table = damask.ASCIItable(name = name, + buffered = False, + labeled = False) except: continue damask.util.report(scriptName,name) @@ -72,7 +68,7 @@ for name in filenames: # --- read data ------------------------------------------------------------------------------------ - microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure + microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure # --- do work ------------------------------------------------------------------------------------ diff --git a/processing/pre/geom_fromAng.py b/processing/pre/geom_fromAng.py index 3cf031db8..987ff8ac1 100755 --- a/processing/pre/geom_fromAng.py +++ b/processing/pre/geom_fromAng.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -124,16 +124,22 @@ for name in filenames: continue eulerangles=np.around(eulerangles,int(options.precision)) # round to desired precision +# ensure, that rounded euler angles are not out of bounds (modulo by limits) for i,angle in enumerate(['phi1','PHI','phi2']): - eulerangles[:,i]%=limits[i] # ensure, that rounded euler angles are not out of bounds (modulo by limits) + eulerangles[:,i]%=limits[i] +# scale angles by desired precision and convert to int. create unique integer key from three euler angles by +# concatenating the string representation with leading zeros and store as integer and search unique euler angle keys. +# Texture IDs are the indices of the first occurrence, the inverse is used to construct the microstructure +# create a microstructure (texture/phase pair) for each point using unique texture IDs. +# Use longInt (64bit, i8) because the keys might be long if options.compress: formatString='{0:0>'+str(int(options.precision)+3)+'}' - euleranglesRadInt = (eulerangles*10**int(options.precision)).astype('int') # scale by desired precision and convert to int + euleranglesRadInt = (eulerangles*10**int(options.precision)).astype('int') eulerKeys = np.array([int(''.join(map(formatString.format,euleranglesRadInt[i,:]))) \ - for i in xrange(info['grid'].prod())]) # create unique integer key from three euler angles by concatenating the string representation with leading zeros and store as integer - devNull, texture, eulerKeys_idx = np.unique(eulerKeys, return_index = True, return_inverse=True)# search unique euler angle keys. Texture IDs are the indices of the first occurrence, the inverse is used to construct the microstructure - msFull = np.array([[eulerKeys_idx[i],phase[i]] for i in xrange(info['grid'].prod())],'i8') # create a microstructure (texture/phase pair) for each point using unique texture IDs. Use longInt (64bit, i8) because the keys might be long + for i in xrange(info['grid'].prod())]) + devNull, texture, eulerKeys_idx = np.unique(eulerKeys, return_index = True, return_inverse=True) + msFull = np.array([[eulerKeys_idx[i],phase[i]] for i in xrange(info['grid'].prod())],'i8') devNull,msUnique,matPoints = np.unique(msFull.view('c16'),True,True) matPoints+=1 microstructure = np.array([msFull[i] for i in msUnique]) # pick only unique microstructures diff --git a/processing/pre/geom_fromBarycentric.py b/processing/pre/geom_fromBarycentric.py index 383681ef9..fe687fd2a 100755 --- a/processing/pre/geom_fromBarycentric.py +++ b/processing/pre/geom_fromBarycentric.py @@ -10,10 +10,11 @@ # maintain meaningful microstructure(reduce artifacts). -import sys, os, string +import os import numpy as np import argparse from scipy.spatial import Delaunay +import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -23,16 +24,14 @@ OFFSET = 0.1 #resize the seeded volume to give space for rim/pan PHANTOM_ID = -1 #grain ID for phantom seeds def d_print(info, data, separator=False): - '''quickly print debug information''' - if(separator): print "*"*80 - print info - print data + """quickly print debug information""" + if(separator): print "*"*80 + print info + print data def meshgrid2(*arrs): - ''' - code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d - ''' + """code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d""" arrs = tuple(reversed(arrs)) arrs = tuple(arrs) lens = np.array(map(len, arrs)) @@ -121,9 +120,9 @@ args = parser.parse_args() # get all the arguments right after #quick help to user print "*"*80 parser.print_help() -print '''Sample usage: +print """Sample usage: ./geoFromBarycentic.py 20grains.seeds -g 128 128 128 -S -r; geom_check seeds.geom; seeds_check new_seed.seeds. -''' +""" print "*"*80 if (args.debug): d_print("args are:", parser.parse_args(),separator=True) diff --git a/processing/pre/geom_fromEuclideanDistance.py b/processing/pre/geom_fromEuclideanDistance.py index 405fca630..a932583c2 100755 --- a/processing/pre/geom_fromEuclideanDistance.py +++ b/processing/pre/geom_fromEuclideanDistance.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math,itertools +import os,sys,math,itertools import numpy as np from scipy import ndimage from optparse import OptionParser diff --git a/processing/pre/geom_fromImage.py b/processing/pre/geom_fromImage.py index 5ef524dcb..0977c5e7e 100755 --- a/processing/pre/geom_fromImage.py +++ b/processing/pre/geom_fromImage.py @@ -1,10 +1,10 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser -from PIL import Image,ImageOps +from PIL import Image import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -51,7 +51,7 @@ for name in filenames: try: img.seek(slice) # advance to slice layer = np.expand_dims(1+np.array(img,dtype = 'uint16'),axis = 0) # read image layer - microstructure = layer if slice == 0 else np.vstack((microstructure,layer)) # add to microstructure data + microstructure = layer if slice == 0 else np.vstack((microstructure,layer)) # noqa slice += 1 # advance to next slice except EOFError: break diff --git a/processing/pre/geom_fromMinimalSurface.py b/processing/pre/geom_fromMinimalSurface.py index 915f1f9f3..a3729dc30 100755 --- a/processing/pre/geom_fromMinimalSurface.py +++ b/processing/pre/geom_fromMinimalSurface.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask diff --git a/processing/pre/geom_fromOsteonGeometry.py b/processing/pre/geom_fromOsteonGeometry.py index 9e8280a72..bc49b9dca 100755 --- a/processing/pre/geom_fromOsteonGeometry.py +++ b/processing/pre/geom_fromOsteonGeometry.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask diff --git a/processing/pre/geom_fromTable.py b/processing/pre/geom_fromTable.py index 68b8aa12d..53281c5df 100755 --- a/processing/pre/geom_fromTable.py +++ b/processing/pre/geom_fromTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math,types,time +import os,sys,math,types,time import scipy.spatial, numpy as np from optparse import OptionParser import damask @@ -94,18 +94,18 @@ parser.set_defaults(symmetry = [damask.Symmetry.lattices[-1]], (options,filenames) = parser.parse_args() -input = [options.eulers != None, - options.a != None and \ - options.b != None and \ - options.c != None, - options.matrix != None, - options.quaternion != None, - options.microstructure != None, +input = [options.eulers is not None, + options.a is not None and \ + options.b is not None and \ + options.c is not None, + options.matrix is not None, + options.quaternion is not None, + options.microstructure is not None, ] if np.sum(input) != 1: parser.error('need either microstructure label or exactly one orientation input format.') -if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])): +if options.axes is not None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])): parser.error('invalid axes {} {} {}.'.format(*options.axes)) (label,dim,inputtype) = [(options.eulers,3,'eulers'), @@ -157,7 +157,7 @@ for name in filenames: if coordDim == 2: table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input if options.verbose: damask.util.croak('extending to 3D...') - if options.phase == None: + if options.phase is None: table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given if options.verbose: damask.util.croak('adding dummy phase info...') @@ -168,7 +168,7 @@ for name in filenames: maxcorner = np.array(map(max,coords)) grid = np.array(map(len,coords),'i') size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings delta = size/np.maximum(np.ones(3,'d'), grid) origin = mincorner - 0.5*delta # shift from cell center to corner @@ -188,7 +188,7 @@ for name in filenames: # ------------------------------------------ process data ------------------------------------------ - colOri = table.label_index(label)+(3-coordDim) # column(s) of orientation data (following 3 or 2 coordinates that were expanded to 3!) + colOri = table.label_index(label)+(3-coordDim) # column(s) of orientation data followed by 3 coordinates if inputtype == 'microstructure': @@ -207,9 +207,9 @@ for name in filenames: statistics = {'global': 0, 'local': 0} grain = -np.ones(N,dtype = 'int32') # initialize empty microstructure - orientations = [] # empty list of orientations - multiplicity = [] # empty list of orientation multiplicity (number of group members) - phases = [] # empty list of phase info + orientations = [] # orientations + multiplicity = [] # orientation multiplicity (number of group members) + phases = [] # phase info nGrains = 0 # counter for detected grains existingGrains = np.arange(nGrains) myPos = 0 # position (in list) of current grid point @@ -227,7 +227,7 @@ for name in filenames: myData = table.data[index[myPos]] # read data for current grid point myPhase = int(myData[colPhase]) - mySym = options.symmetry[min(myPhase,len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index) + mySym = options.symmetry[min(myPhase,len(options.symmetry))-1] # take last specified option for all with higher index if inputtype == 'eulers': o = damask.Orientation(Eulers = myData[colOri:colOri+3]*toRadians, @@ -250,26 +250,27 @@ for name in filenames: if options.tolerance > 0.0: # only try to compress orientations if asked to neighbors = np.array(KDTree.query_ball_point([x,y,z], 3)) # point indices within radius +# filter neighbors: skip myself, anyone further ahead (cannot yet have a grain ID), and other phases neighbors = neighbors[(neighbors < myPos) & \ - (table.data[index[neighbors],colPhase] == myPhase)] # filter neighbors: skip myself, anyone further ahead (cannot yet have a grain ID), and other phases + (table.data[index[neighbors],colPhase] == myPhase)] grains = np.unique(grain[neighbors]) # unique grain IDs among valid neighbors if len(grains) > 0: # check immediate neighborhood first cos_disorientations = np.array([o.disorientation(orientations[grainID], SST = False)[0].quaternion.w \ for grainID in grains]) # store disorientation per grainID - closest_grain = np.argmax(cos_disorientations) # find grain among grains that has closest orientation to myself + closest_grain = np.argmax(cos_disorientations) # grain among grains with closest orientation to myself match = 'local' if cos_disorientations[closest_grain] < threshold: # orientation not close enough? - grains = existingGrains[np.atleast_1d( ( np.array(phases) == myPhase ) & \ - ( np.in1d(existingGrains,grains,invert=True) ) )] # check every other already identified grain (of my phase) + grains = existingGrains[np.atleast_1d( (np.array(phases) == myPhase ) & \ + (np.in1d(existingGrains,grains,invert=True)))] # other already identified grains (of my phase) if len(grains) > 0: cos_disorientations = np.array([o.disorientation(orientations[grainID], SST = False)[0].quaternion.w \ for grainID in grains]) # store disorientation per grainID - closest_grain = np.argmax(cos_disorientations) # find grain among grains that has closest orientation to myself + closest_grain = np.argmax(cos_disorientations) # grain among grains with closest orientation to myself match = 'global' if cos_disorientations[closest_grain] >= threshold: # orientation now close enough? @@ -331,7 +332,7 @@ for name in filenames: config_header += [''] for i,orientation in enumerate(orientations): config_header += ['[Grain%s]'%(str(i+1).zfill(formatwidth)), - 'axes\t%s %s %s'%tuple(options.axes) if options.axes != None else '', + 'axes\t%s %s %s'%tuple(options.axes) if options.axes is not None else '', '(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(orientation.asEulers(degrees = True)), ] diff --git a/processing/pre/geom_fromVPSC.py b/processing/pre/geom_fromVPSC.py index 99762fd5e..a555f6d07 100755 --- a/processing/pre/geom_fromVPSC.py +++ b/processing/pre/geom_fromVPSC.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -101,16 +101,22 @@ for name in filenames: %(angle,n,eulerangles[n,0],eulerangles[n,1],eulerangles[n,2])) continue eulerangles=np.around(eulerangles,int(options.precision)) # round to desired precision +# ensure, that rounded euler angles are not out of bounds (modulo by limits) for i,angle in enumerate(['phi1','PHI','phi2']): - eulerangles[:,i]%=limits[i] # ensure, that rounded euler angles are not out of bounds (modulo by limits) + eulerangles[:,i]%=limits[i] +# scale angles by desired precision and convert to int. create unique integer key from three euler angles by +# concatenating the string representation with leading zeros and store as integer and search unique euler angle keys. +# Texture IDs are the indices of the first occurrence, the inverse is used to construct the microstructure +# create a microstructure (texture/phase pair) for each point using unique texture IDs. +# Use longInt (64bit, i8) because the keys might be long if options.compress: formatString='{0:0>'+str(int(options.precision)+3)+'}' - euleranglesRadInt = (eulerangles*10**int(options.precision)).astype('int') # scale by desired precision and convert to int + euleranglesRadInt = (eulerangles*10**int(options.precision)).astype('int') eulerKeys = np.array([int(''.join(map(formatString.format,euleranglesRadInt[i,:]))) \ - for i in xrange(info['grid'].prod())]) # create unique integer key from three euler angles by concatenating the string representation with leading zeros and store as integer - devNull, texture, eulerKeys_idx = np.unique(eulerKeys, return_index = True, return_inverse=True)# search unique euler angle keys. Texture IDs are the indices of the first occurence, the inverse is used to construct the microstructure - msFull = np.array([[eulerKeys_idx[i],phase[i]] for i in xrange(info['grid'].prod())],'i8') # create a microstructure (texture/phase pair) for each point using unique texture IDs. Use longInt (64bit, i8) because the keys might be long + for i in xrange(info['grid'].prod())]) + devNull, texture, eulerKeys_idx = np.unique(eulerKeys, return_index = True, return_inverse=True) + msFull = np.array([[eulerKeys_idx[i],phase[i]] for i in xrange(info['grid'].prod())],'i8') devNull,msUnique,matPoints = np.unique(msFull.view('c16'),True,True) matPoints+=1 microstructure = np.array([msFull[i] for i in msUnique]) # pick only unique microstructures diff --git a/processing/pre/geom_fromVoronoiTessellation.py b/processing/pre/geom_fromVoronoiTessellation.py index 0a6e186fe..569c181c5 100755 --- a/processing/pre/geom_fromVoronoiTessellation.py +++ b/processing/pre/geom_fromVoronoiTessellation.py @@ -1,10 +1,10 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,math,string +import os,sys,math import numpy as np import multiprocessing -from optparse import OptionParser +from optparse import OptionParser,OptionGroup from scipy import spatial import damask @@ -13,9 +13,7 @@ scriptID = ' '.join([scriptName,damask.version]) def meshgrid2(*arrs): - ''' - code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d - ''' + """code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d""" arrs = tuple(reversed(arrs)) arrs = tuple(arrs) lens = np.array(map(len, arrs)) @@ -45,7 +43,7 @@ def laguerreTessellation(undeformed, coords, weights, grains, nonperiodic = Fals np.array([ [ 0, 0, 0 ], ]).astype(float) if nonperiodic else \ - np.array([ + np.array([ [ -1,-1,-1 ], [ 0,-1,-1 ], [ 1,-1,-1 ], @@ -99,7 +97,8 @@ def laguerreTessellation(undeformed, coords, weights, grains, nonperiodic = Fals for i,arg in enumerate(arguments): closestSeeds[i] = findClosestSeed(arg) - return grains[closestSeeds%coords.shape[0]] # closestSeed is modulo number of original seed points (i.e. excluding periodic copies) +# closestSeed is modulo number of original seed points (i.e. excluding periodic copies) + return grains[closestSeeds%coords.shape[0]] # -------------------------------------------------------------------- # MAIN @@ -110,71 +109,83 @@ Generate geometry description and material configuration by standard Voronoi tes """, version = scriptID) -parser.add_option('-g', '--grid', - dest = 'grid', - type = 'int', nargs = 3, metavar = ' '.join(['int']*3), - help = 'a,b,c grid of hexahedral box [auto]') -parser.add_option('-s', '--size', - dest = 'size', - type = 'float', nargs = 3, metavar=' '.join(['float']*3), - help = 'x,y,z size of hexahedral box [auto]') -parser.add_option('-o', '--origin', - dest = 'origin', - type = 'float', nargs = 3, metavar=' '.join(['float']*3), - help = 'offset from old to new origin of grid') -parser.add_option('-p', '--position', - dest = 'position', - type = 'string', metavar = 'string', - help = 'column label for seed positions [%default]') -parser.add_option('-w', '--weight', - dest = 'weight', - type = 'string', metavar = 'string', - help = 'column label for seed weights [%default]') -parser.add_option('-m', '--microstructure', - dest = 'microstructure', - type = 'string', metavar = 'string', - help = 'column label for seed microstructures [%default]') -parser.add_option('-e', '--eulers', - dest = 'eulers', - type = 'string', metavar = 'string', - help = 'column label for seed Euler angles [%default]') -parser.add_option('--axes', - dest = 'axes', - type = 'string', nargs = 3, metavar = ' '.join(['string']*3), - help = 'orientation coordinate frame in terms of position coordinate frame') -parser.add_option('--homogenization', - dest = 'homogenization', - type = 'int', metavar = 'int', - help = 'homogenization index to be used [%default]') -parser.add_option('--crystallite', - dest = 'crystallite', - type = 'int', metavar = 'int', - help = 'crystallite index to be used [%default]') -parser.add_option('--phase', - dest = 'phase', - type = 'int', metavar = 'int', - help = 'phase index to be used [%default]') -parser.add_option('-r', '--rnd', - dest = 'randomSeed', - type = 'int', metavar='int', - help = 'seed of random number generator for second phase distribution [%default]') -parser.add_option('--secondphase', - dest = 'secondphase', - type = 'float', metavar= 'float', - help = 'volume fraction of randomly distribute second phase [%default]') -parser.add_option('-l', '--laguerre', + +group = OptionGroup(parser, "Tessellation","") + +group.add_option('-l', '--laguerre', dest = 'laguerre', action = 'store_true', help = 'use Laguerre (weighted Voronoi) tessellation') -parser.add_option('--cpus', +group.add_option('--cpus', dest = 'cpus', type = 'int', metavar = 'int', help = 'number of parallel processes to use for Laguerre tessellation [%default]') -parser.add_option('--nonperiodic', +group.add_option('--nonperiodic', dest = 'nonperiodic', action = 'store_true', help = 'use nonperiodic tessellation') +parser.add_option_group(group) + +group = OptionGroup(parser, "Geometry","") + +group.add_option('-g', '--grid', + dest = 'grid', + type = 'int', nargs = 3, metavar = ' '.join(['int']*3), + help = 'a,b,c grid of hexahedral box [auto]') +group.add_option('-s', '--size', + dest = 'size', + type = 'float', nargs = 3, metavar=' '.join(['float']*3), + help = 'x,y,z size of hexahedral box [auto]') +group.add_option('-o', '--origin', + dest = 'origin', + type = 'float', nargs = 3, metavar=' '.join(['float']*3), + help = 'origin of grid') + +parser.add_option_group(group) + +group = OptionGroup(parser, "Seeds","") + +group.add_option('-p', '--position', + dest = 'position', + type = 'string', metavar = 'string', + help = 'column label for seed positions [%default]') +group.add_option('-w', '--weight', + dest = 'weight', + type = 'string', metavar = 'string', + help = 'column label for seed weights [%default]') +group.add_option('-m', '--microstructure', + dest = 'microstructure', + type = 'string', metavar = 'string', + help = 'column label for seed microstructures [%default]') +group.add_option('-e', '--eulers', + dest = 'eulers', + type = 'string', metavar = 'string', + help = 'column label for seed Euler angles [%default]') +group.add_option('--axes', + dest = 'axes', + type = 'string', nargs = 3, metavar = ' '.join(['string']*3), + help = 'orientation coordinate frame in terms of position coordinate frame') + +parser.add_option_group(group) + +group = OptionGroup(parser, "Configuration","") + +group.add_option('--homogenization', + dest = 'homogenization', + type = 'int', metavar = 'int', + help = 'homogenization index to be used [%default]') +group.add_option('--crystallite', + dest = 'crystallite', + type = 'int', metavar = 'int', + help = 'crystallite index to be used [%default]') +group.add_option('--phase', + dest = 'phase', + type = 'int', metavar = 'int', + help = 'phase index to be used [%default]') + +parser.add_option_group(group) + parser.set_defaults(position = 'pos', weight = 'weight', microstructure = 'microstructure', @@ -182,26 +193,20 @@ parser.set_defaults(position = 'pos', homogenization = 1, crystallite = 1, phase = 1, - secondphase = 0.0, cpus = 2, laguerre = False, nonperiodic = False, - randomSeed = None, ) (options,filenames) = parser.parse_args() -if options.secondphase > 1.0 or options.secondphase < 0.0: - parser.error('volume fraction of second phase ({}) out of bounds.'.format(options.secondphase)) - # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.splitext(name)[-2]+'.geom' if name else name, - buffered = False) + try: table = damask.ASCIItable(name = name, + outname = os.path.splitext(name)[-2]+'.geom' if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -210,9 +215,9 @@ for name in filenames: table.head_read() info,extra_header = table.head_getGeom() - if options.grid != None: info['grid'] = options.grid - if options.size != None: info['size'] = options.size - if options.origin != None: info['origin'] = options.origin + if options.grid is not None: info['grid'] = options.grid + if options.size is not None: info['size'] = options.size + if options.origin is not None: info['origin'] = options.origin # ------------------------------------------ sanity checks --------------------------------------- @@ -277,47 +282,38 @@ for name in filenames: grid = np.vstack(meshgrid2(x, y, z)).reshape(3,-1).T indices = laguerreTessellation(grid, coords, weights, grains, options.nonperiodic, options.cpus) -# --- write header --------------------------------------------------------------------------------- +# --- write header ------------------------------------------------------------------------ - grainIDs = np.intersect1d(grainIDs,indices) - info['microstructures'] = len(grainIDs) + usedGrainIDs = np.intersect1d(grainIDs,indices) + info['microstructures'] = len(usedGrainIDs) if info['homogenization'] == 0: info['homogenization'] = options.homogenization damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))), - 'size x y z: %s'%(' x '.join(map(str,info['size']))), - 'origin x y z: %s'%(' : '.join(map(str,info['origin']))), - 'homogenization: %i'%info['homogenization'], - 'microstructures: %i%s'%(info['microstructures'], + 'size x y z: %s'%(' x '.join(map(str,info['size']))), + 'origin x y z: %s'%(' : '.join(map(str,info['origin']))), + 'homogenization: %i'%info['homogenization'], + 'microstructures: %i%s'%(info['microstructures'], (' out of %i'%NgrainIDs if NgrainIDs != info['microstructures'] else '')), - ]) + ]) config_header = [] - formatwidth = 1+int(math.log10(info['microstructures'])) - - phase = options.phase * np.ones(info['microstructures'],'i') - if int(options.secondphase*info['microstructures']) > 0: - phase[0:int(options.secondphase*info['microstructures'])] += 1 - randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None \ - else options.randomSeed # random seed for second phase - np.random.seed(randomSeed) - np.random.shuffle(phase) - config_header += ['# random seed (phase shuffling): {}'.format(randomSeed)] + formatwidth = 1+int(math.log10(NgrainIDs)) config_header += [''] for i,ID in enumerate(grainIDs): config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)), 'crystallite %i'%options.crystallite, - '(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase[i],str(ID).rjust(formatwidth)), + '(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(options.phase,str(ID).rjust(formatwidth)), ] if hasEulers: config_header += [''] for ID in grainIDs: - eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id + eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)), '(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(eulers[eulerID]) ] - if options.axes != None: config_header.append('axes\t%s %s %s'%tuple(options.axes)) + if options.axes is not None: config_header.append('axes\t%s %s %s'%tuple(options.axes)) table.labels_clear() table.info_clear() diff --git a/processing/pre/geom_grainGrowth.py b/processing/pre/geom_grainGrowth.py index 70fdc095f..0f72a3589 100755 --- a/processing/pre/geom_grainGrowth.py +++ b/processing/pre/geom_grainGrowth.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser from scipy import ndimage @@ -73,15 +73,16 @@ for name in filenames: # --- read data ------------------------------------------------------------------------------------ microstructure = np.tile(np.array(table.microstructure_read(info['grid']),'i').reshape(info['grid'],order='F'), - np.where(info['grid'] == 1, 2,1)) # make one copy along dimensions with grid == 1 + np.where(info['grid'] == 1, 2,1)) # make one copy along dimensions with grid == 1 grid = np.array(microstructure.shape) #--- initialize support data ----------------------------------------------------------------------- periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]/2:-grid[0]/2, grid[1]/2:-grid[1]/2, - grid[2]/2:-grid[2]/2] # periodically extend the microstructure - microstructure_original = np.copy(microstructure) # store a copy the initial microstructure to find locations of immutable indices + grid[2]/2:-grid[2]/2] # periodically extend the microstructure +# store a copy the initial microstructure to find locations of immutable indices + microstructure_original = np.copy(microstructure) X,Y,Z = np.mgrid[0:grid[0],0:grid[1],0:grid[2]] gauss = np.exp(-(X*X + Y*Y + Z*Z)/(2.0*options.d*options.d))/math.pow(2.0*np.pi*options.d*options.d,1.5) @@ -99,44 +100,50 @@ for name in filenames: for i in (-1,0,1): for j in (-1,0,1): for k in (-1,0,1): + # assign interfacial energy to all voxels that have a differing neighbor (in Moore neighborhood) interfaceEnergy = np.maximum(boundary, interfacialEnergy(microstructure,np.roll(np.roll(np.roll( - microstructure,i,axis=0), j,axis=1), k,axis=2))) # assign interfacial energy to all voxels that have a differing neighbor (in Moore neighborhood) + microstructure,i,axis=0), j,axis=1), k,axis=2))) + # periodically extend interfacial energy array by half a grid size in positive and negative directions periodic_interfaceEnergy = np.tile(interfaceEnergy,(3,3,3))[grid[0]/2:-grid[0]/2, grid[1]/2:-grid[1]/2, - grid[2]/2:-grid[2]/2] # periodically extend interfacial energy array by half a grid size in positive and negative directions - index = ndimage.morphology.distance_transform_edt(periodic_interfaceEnergy == 0., # transform bulk volume (i.e. where interfacial energy is zero) + grid[2]/2:-grid[2]/2] + # transform bulk volume (i.e. where interfacial energy is zero) + index = ndimage.morphology.distance_transform_edt(periodic_interfaceEnergy == 0., return_distances = False, - return_indices = True) # want array index of nearest voxel on periodically extended boundary -# boundaryExt = boundaryExt[index[0].flatten(),index[1].flatten(),index[2].flatten()].reshape(boundaryExt.shape) # fill bulk with energy of nearest interface | question PE: what "flatten" for? + return_indices = True) + # want array index of nearest voxel on periodically extended boundary periodic_bulkEnergy = periodic_interfaceEnergy[index[0], index[1], - index[2]].reshape(2*grid) # fill bulk with energy of nearest interface - diffusedEnergy = np.fft.irfftn(np.fft.rfftn(np.where(ndimage.morphology.binary_dilation(interfaceEnergy > 0., - structure = struc, - iterations = options.d/2 + 1), # fat boundary | question PE: why 2d - 1? I would argue for d/2 + 1 !! - periodic_bulkEnergy[grid[0]/2:-grid[0]/2, # retain filled energy on fat boundary... - grid[1]/2:-grid[1]/2, - grid[2]/2:-grid[2]/2], # ...and zero everywhere else - 0.)\ - )*gauss) + index[2]].reshape(2*grid) # fill bulk with energy of nearest interface + diffusedEnergy = np.fft.irfftn(np.fft.rfftn( + np.where( + ndimage.morphology.binary_dilation(interfaceEnergy > 0., + structure = struc, + iterations = options.d/2 + 1), # fat boundary | PE: why 2d-1? I would argue for d/2 + 1 + periodic_bulkEnergy[grid[0]/2:-grid[0]/2, # retain filled energy on fat boundary... + grid[1]/2:-grid[1]/2, + grid[2]/2:-grid[2]/2], # ...and zero everywhere else + 0.))*gauss) periodic_diffusedEnergy = np.tile(diffusedEnergy,(3,3,3))[grid[0]/2:-grid[0]/2, grid[1]/2:-grid[1]/2, - grid[2]/2:-grid[2]/2] # periodically extend the smoothed bulk energy - index = ndimage.morphology.distance_transform_edt(periodic_diffusedEnergy >= 0.5, # transform voxels close to interface region | question PE: what motivates 1/2 (could be any small number, or)? + grid[2]/2:-grid[2]/2] # periodically extend the smoothed bulk energy + # transform voxels close to interface region | question PE: what motivates 1/2 (could be any small number, or)? + index = ndimage.morphology.distance_transform_edt(periodic_diffusedEnergy >= 0.5, return_distances = False, - return_indices = True) # want index of closest bulk grain + return_indices = True) # want index of closest bulk grain microstructure = periodic_microstructure[index[0], index[1], index[2]].reshape(2*grid)[grid[0]/2:-grid[0]/2, grid[1]/2:-grid[1]/2, - grid[2]/2:-grid[2]/2] # extent grains into interface region + grid[2]/2:-grid[2]/2] # extent grains into interface region immutable = np.zeros(microstructure.shape, dtype=bool) + # find locations where immutable microstructures have been or are now for micro in options.immutable: - immutable += np.logical_or(microstructure == micro, microstructure_original == micro) # find locations where immutable microstructures have been or are now - - microstructure = np.where(immutable, microstructure_original,microstructure) # undo any changes involving immutable microstructures + immutable += np.logical_or(microstructure == micro, microstructure_original == micro) + # undo any changes involving immutable microstructures + microstructure = np.where(immutable, microstructure_original,microstructure) # --- renumber to sequence 1...Ngrains if requested ------------------------------------------------ # http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array diff --git a/processing/pre/geom_pack.py b/processing/pre/geom_pack.py index 9edb8c3dc..17d1cb63c 100755 --- a/processing/pre/geom_pack.py +++ b/processing/pre/geom_pack.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask diff --git a/processing/pre/geom_rescale.py b/processing/pre/geom_rescale.py index b0d27f474..e005e41a9 100755 --- a/processing/pre/geom_rescale.py +++ b/processing/pre/geom_rescale.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -82,8 +82,12 @@ for name in filenames: 'microstructures': 0, } - newInfo['grid'] = np.array([{True:round(o*float(n.translate(None,'xX'))), False: round(float(n.translate(None,'xX')))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i') - newInfo['size'] = np.array([{True: o*float(n.translate(None,'xX')) , False: float(n.translate(None,'xX')) }[n[-1].lower() == 'x'] for o,n in zip(info['size'],options.size)],'d') + newInfo['grid'] = np.array([{True:round(o*float(n.translate(None,'xX'))), + False: round(float(n.translate(None,'xX')))}[n[-1].lower() == 'x'] + for o,n in zip(info['grid'],options.grid)],'i') + newInfo['size'] = np.array([{True: o*float(n.translate(None,'xX')) , + False: float(n.translate(None,'xX')) }[n[-1].lower() == 'x'] + for o,n in zip(info['size'],options.size)],'d') newInfo['grid'] = np.where(newInfo['grid'] <= 0 , info['grid'],newInfo['grid']) newInfo['size'] = np.where(newInfo['size'] <= 0.0, info['size'],newInfo['size']) @@ -97,11 +101,8 @@ for name in filenames: last = this microstructure = microstructure.reshape(info['grid'],order='F') - microstructure = np.repeat( - np.repeat( - np.repeat(microstructure,multiplicity[0], axis=0), - multiplicity[1], axis=1), - multiplicity[2], axis=2) + microstructure = np.repeat(np.repeat(np.repeat(microstructure, + multiplicity[0], axis=0),multiplicity[1], axis=1),multiplicity[2], axis=2) # --- renumber to sequence 1...Ngrains if requested ------------------------------------------------ # http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array @@ -119,9 +120,12 @@ for name in filenames: remarks = [] errors = [] - if (any(newInfo['grid'] != info['grid'])): remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid'])))) - if (any(newInfo['size'] != info['size'])): remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size'])))) - if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures']) + if (any(newInfo['grid'] != info['grid'])): + remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid'])))) + if (any(newInfo['size'] != info['size'])): + remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size'])))) + if ( newInfo['microstructures'] != info['microstructures']): + remarks.append('--> microstructures: %i'%newInfo['microstructures']) if np.any(newInfo['grid'] < 1): errors.append('invalid new grid a b c.') if np.any(newInfo['size'] <= 0.0): errors.append('invalid new size x y z.') diff --git a/processing/pre/geom_rotate.py b/processing/pre/geom_rotate.py index ecd0da09c..7ae47f894 100755 --- a/processing/pre/geom_rotate.py +++ b/processing/pre/geom_rotate.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np import damask from scipy import ndimage @@ -49,7 +49,7 @@ parser.set_defaults(degrees = False, (options, filenames) = parser.parse_args() -if sum(x != None for x in [options.rotation,options.eulers,options.matrix,options.quaternion]) !=1: +if sum(x is not None for x in [options.rotation,options.eulers,options.matrix,options.quaternion]) !=1: parser.error('not exactly one rotation specified...') toRadian = math.pi/180. if options.degrees else 1.0 diff --git a/processing/pre/geom_toTable.py b/processing/pre/geom_toTable.py index 79ace7d24..dff0c8150 100755 --- a/processing/pre/geom_toTable.py +++ b/processing/pre/geom_toTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,sys import numpy as np from optparse import OptionParser import damask diff --git a/processing/pre/geom_translate.py b/processing/pre/geom_translate.py index c6c93f0d3..13f4ef592 100755 --- a/processing/pre/geom_translate.py +++ b/processing/pre/geom_translate.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np import damask from optparse import OptionParser @@ -95,8 +95,10 @@ for name in filenames: # --- report --------------------------------------------------------------------------------------- remarks = [] - if (any(newInfo['origin'] != info['origin'])): remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin'])))) - if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures']) + if (any(newInfo['origin'] != info['origin'])): + remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin'])))) + if ( newInfo['microstructures'] != info['microstructures']): + remarks.append('--> microstructures: %i'%newInfo['microstructures']) if remarks != []: damask.util.croak(remarks) # --- write header --------------------------------------------------------------------------------- diff --git a/processing/pre/geom_unpack.py b/processing/pre/geom_unpack.py index 43e2764b4..85c60d6e5 100755 --- a/processing/pre/geom_unpack.py +++ b/processing/pre/geom_unpack.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from optparse import OptionParser import damask @@ -18,6 +18,14 @@ Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (excl """, version = scriptID) +parser.add_option('-1', '--onedimensional', + dest = 'oneD', + action = 'store_true', + help = 'output geom file with one-dimensional data arrangement') + +parser.set_defaults(oneD = False, + ) + (options, filenames) = parser.parse_args() # --- loop over input files ------------------------------------------------------------------------- @@ -69,7 +77,8 @@ for name in filenames: microstructure = table.microstructure_read(info['grid']) # read microstructure formatwidth = int(math.floor(math.log10(microstructure.max())+1)) # efficient number printing format - table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() + table.data = microstructure if options.oneD else \ + microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ') #--- output finalization -------------------------------------------------------------------------- diff --git a/processing/pre/geom_vicinityOffset.py b/processing/pre/geom_vicinityOffset.py index c11188997..8a2a0c912 100755 --- a/processing/pre/geom_vicinityOffset.py +++ b/processing/pre/geom_vicinityOffset.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math +import os,sys,math import numpy as np from scipy import ndimage from optparse import OptionParser @@ -87,9 +87,8 @@ for name in filenames: # --- report --------------------------------------------------------------------------------------- - remarks = [] - if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures']) - if remarks != []: damask.util.croak(remarks) + if (newInfo['microstructures'] != info['microstructures']): + damask.util.croak('--> microstructures: %i'%newInfo['microstructures']) # --- write header --------------------------------------------------------------------------------- diff --git a/processing/pre/gmsh_identifySurfaces.py b/processing/pre/gmsh_identifySurfaces.py index f35579566..71c80a1dc 100755 --- a/processing/pre/gmsh_identifySurfaces.py +++ b/processing/pre/gmsh_identifySurfaces.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,string,re +import os,re from optparse import OptionParser import damask @@ -142,7 +142,7 @@ if (options.dimension == 3): elif (options.dimension == 2): for i,l in enumerate(line): - # for pts in line[int(abs(lines)-1)]: + # for pts in line[int(abs(lines)-1)]: for pts in l: x_coord.append(point[int(pts)-1][0]) y_coord.append(point[int(pts)-1][1]) diff --git a/processing/pre/hybridIA_linODFsampling.py b/processing/pre/hybridIA_linODFsampling.py index fee991aa2..a16c0db09 100755 --- a/processing/pre/hybridIA_linODFsampling.py +++ b/processing/pre/hybridIA_linODFsampling.py @@ -3,7 +3,7 @@ from optparse import OptionParser import damask -import os,sys,math,re,random,string +import os,sys,math,random import numpy as np scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -19,7 +19,7 @@ def integerFactorization(i): return j def binAsBins(bin,intervals): - """ explode compound bin into 3D bins list """ + """explode compound bin into 3D bins list""" bins = [0]*3 bins[0] = (bin//(intervals[1] * intervals[2])) % intervals[0] bins[1] = (bin//intervals[2]) % intervals[1] @@ -27,17 +27,17 @@ def binAsBins(bin,intervals): return bins def binsAsBin(bins,intervals): - """ implode 3D bins into compound bin """ + """implode 3D bins into compound bin""" return (bins[0]*intervals[1] + bins[1])*intervals[2] + bins[2] def EulersAsBins(Eulers,intervals,deltas,center): - """ return list of Eulers translated into 3D bins list """ + """return list of Eulers translated into 3D bins list""" return [int((euler+(0.5-center)*delta)//delta)%interval \ for euler,delta,interval in zip(Eulers,deltas,intervals) \ ] def binAsEulers(bin,intervals,deltas,center): - """ compound bin number translated into list of Eulers """ + """compound bin number translated into list of Eulers""" Eulers = [0.0]*3 Eulers[2] = (bin%intervals[2] + center)*deltas[2] Eulers[1] = (bin//intervals[2]%intervals[1] + center)*deltas[1] @@ -45,7 +45,7 @@ def binAsEulers(bin,intervals,deltas,center): return Eulers def directInvRepetitions(probability,scale): - """ calculate number of samples drawn by direct inversion """ + """calculate number of samples drawn by direct inversion""" nDirectInv = 0 for bin in range(len(probability)): # loop over bins nDirectInv += int(round(probability[bin]*scale)) # calc repetition @@ -55,15 +55,12 @@ def directInvRepetitions(probability,scale): # ---------------------- sampling methods ----------------------------------------------------------------------- # ----- efficient algorithm --------- - def directInversion (ODF,nSamples): - """ ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """ - + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested nInvSamples = 0 repetition = [None]*ODF['nBins'] - probabilityScale = nOptSamples # guess scaleLower = 0.0 nInvSamplesLower = 0 @@ -96,7 +93,7 @@ def directInversion (ODF,nSamples): for bin in range(ODF['nBins']): # loop over bins repetition[bin] = int(round(ODF['dV_V'][bin]*scale)) # calc repetition - # build set +# build set set = [None]*nInvSamples i = 0 for bin in range(ODF['nBins']): @@ -110,7 +107,6 @@ def directInversion (ODF,nSamples): if (j == nInvSamples-1): ex = j else: ex = int(round(random.uniform(j+0.5,nInvSamples-0.5))) bin = set[ex] - bins = binAsBins(bin,ODF['interval']) # PE: why are we doing this?? Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center']) orientations[j] = np.degrees(Eulers) reconstructedODF[bin] += unitInc @@ -122,8 +118,7 @@ def directInversion (ODF,nSamples): # ----- trial and error algorithms --------- def MonteCarloEulers (ODF,nSamples): - """ ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """ - + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" countMC = 0 maxdV_V = max(ODF['dV_V']) orientations = np.zeros((nSamples,3),'f') @@ -146,8 +141,7 @@ def MonteCarloEulers (ODF,nSamples): def MonteCarloBins (ODF,nSamples): - """ ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """ - + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" countMC = 0 maxdV_V = max(ODF['dV_V']) orientations = np.zeros((nSamples,3),'f') @@ -169,8 +163,7 @@ def MonteCarloBins (ODF,nSamples): def TothVanHoutteSTAT (ODF,nSamples): - """ ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """ - + """ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)""" orientations = np.zeros((nSamples,3),'f') reconstructedODF = np.zeros(ODF['nBins'],'f') unitInc = 1.0/nSamples @@ -199,7 +192,7 @@ def TothVanHoutteSTAT (ODF,nSamples): # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description =""" Transform linear binned ODF data into given number of orientations. IA: integral approximation, STAT: Van Houtte, MC: Monte Carlo @@ -251,7 +244,7 @@ for name in filenames: continue damask.util.report(scriptName,name) - randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file for second phase + randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed is None else options.randomSeed # random seed per file for second phase random.seed(randomSeed) # ------------------------------------------ read header and data --------------------------------- @@ -308,13 +301,13 @@ for name in filenames: 'Reference Integral: %12.11f\n'%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1]))), ]) - # call methods +# call methods Functions = {'IA': 'directInversion', 'STAT': 'TothVanHoutteSTAT', 'MC': 'MonteCarloBins'} method = Functions[options.algorithm] Orientations, ReconstructedODF = (globals()[method])(ODF,nSamples) - # calculate accuracy of sample +# calculate accuracy of sample squaredDiff = {'orig':0.0,method:0.0} squaredRelDiff = {'orig':0.0,method:0.0} mutualProd = {'orig':0.0,method:0.0} @@ -375,7 +368,7 @@ for name in filenames: '(gauss) phi1 {} Phi {} phi2 {} scatter 0.0 fraction 1.0'.format(*eulers), ] - #--- output finalization -------------------------------------------------------------------------- +#--- output finalization -------------------------------------------------------------------------- with (open(os.path.splitext(name)[0]+'_'+method+'_'+str(nSamples)+'_material.config','w')) as outfile: outfile.write('\n'.join(materialConfig)+'\n') diff --git a/processing/pre/marc_addUserOutput.py b/processing/pre/marc_addUserOutput.py index b81c0d826..c453be63f 100755 --- a/processing/pre/marc_addUserOutput.py +++ b/processing/pre/marc_addUserOutput.py @@ -1,13 +1,14 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -''' +""" Writes meaningful labels to the marc input file (*.dat) -based on the files + +output is based on the files .output that are written during the first run of the model. -''' -import sys,os,re,string +""" +import sys,os,re from optparse import OptionParser import damask @@ -16,7 +17,6 @@ scriptID = ' '.join([scriptName,damask.version]) # ----------------------------- def ParseOutputFormat(filename,what,me): -# ----------------------------- format = {'outputs':{},'specials':{'brothers':[]}} outputmetafile = filename+'.output'+what @@ -121,7 +121,7 @@ for file in files: for what in me: outputFormat[what] = ParseOutputFormat(formatFile,what,me[what]) - if not '_id' in outputFormat[what]['specials']: + if '_id' not in outputFormat[what]['specials']: print "'%s' not found in <%s>"%(me[what],what) print '\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers'])) sys.exit(1) diff --git a/processing/pre/mentat_pbcOnBoxMesh.py b/processing/pre/mentat_pbcOnBoxMesh.py index ca18af0b5..7213bbd42 100755 --- a/processing/pre/mentat_pbcOnBoxMesh.py +++ b/processing/pre/mentat_pbcOnBoxMesh.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import sys,os,string +import sys,os import numpy as np from optparse import OptionParser import damask @@ -17,14 +17,13 @@ def outMentat(cmd,locals): exec(cmd[3:]) elif cmd[0:3] == '(?)': cmd = eval(cmd[3:]) - py_send(cmd) + py_mentat.py_send(cmd) else: - py_send(cmd) + py_mentat.py_send(cmd) return #------------------------------------------------------------------------------------------------- def outFile(cmd,locals,dest): -#------------------------------------------------------------------------------------------------- if cmd[0:3] == '(!)': exec(cmd[3:]) elif cmd[0:3] == '(?)': @@ -37,7 +36,6 @@ def outFile(cmd,locals,dest): #------------------------------------------------------------------------------------------------- def output(cmds,locals,dest): -#------------------------------------------------------------------------------------------------- for cmd in cmds: if isinstance(cmd,list): output(cmd,locals,dest) @@ -58,12 +56,12 @@ def servoLink(): 'max': np.zeros(3,dtype='d'), 'delta': np.zeros(3,dtype='d'), } - Nnodes = py_get_int("nnodes()") + Nnodes = py_mentat.py_get_int("nnodes()") NodeCoords = np.zeros((Nnodes,3),dtype='d') for node in xrange(Nnodes): - NodeCoords[node,0] = py_get_float("node_x(%i)"%(node+1)) - NodeCoords[node,1] = py_get_float("node_y(%i)"%(node+1)) - NodeCoords[node,2] = py_get_float("node_z(%i)"%(node+1)) + NodeCoords[node,0] = py_mentat.py_get_float("node_x(%i)"%(node+1)) + NodeCoords[node,1] = py_mentat.py_get_float("node_y(%i)"%(node+1)) + NodeCoords[node,2] = py_mentat.py_get_float("node_z(%i)"%(node+1)) box['min'] = NodeCoords.min(axis=0) # find the bounding box box['max'] = NodeCoords.max(axis=0) box['delta'] = box['max']-box['min'] @@ -79,7 +77,6 @@ def servoLink(): #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # loop over all nodes for node in xrange(Nnodes): - pos = {} key = {} maxFlag = [False, False, False] Nmax = 0 @@ -97,7 +94,7 @@ def servoLink(): maxFlag[coord] = True # remember face membership (for linked nodes) if Nmin > 0: # node is on a back face - # prepare for any non-existing entries in the data structure + # prepare for any non-existing entries in the data structure if key['x'] not in baseNode.keys(): baseNode[key['x']] = {} if key['y'] not in baseNode[key['x']].keys(): @@ -132,7 +129,8 @@ def servoLink(): ]) for i in range(nLinks): cmds.append([ - "*link_class servo *servo_ret_node %i %i"%(i+1,baseNode["%.8e"%linkCoord[i][0]]["%.8e"%linkCoord[i][1]]["%.8e"%linkCoord[i][2]]), + "*link_class servo *servo_ret_node %i %i"\ + %(i+1,baseNode["%.8e"%linkCoord[i][0]]["%.8e"%linkCoord[i][1]]["%.8e"%linkCoord[i][2]]), "*link_class servo *servo_ret_dof %i %i"%(i+1,dof), "*link_class servo *servo_ret_coef %i 1"%(i+1), ]) @@ -166,7 +164,7 @@ else: file={'croak':sys.stdout} try: - from py_mentat import * + import py_mentat except: file['croak'].write('error: no valid Mentat release found') sys.exit(-1) @@ -176,8 +174,9 @@ outputLocals = {} file['croak'].write('\033[1m'+scriptName+'\033[0m\n\n') file['croak'].write( 'waiting to connect...\n') try: - py_connect('',options.port) - output(['*draw_manual'],outputLocals,'Mentat') # prevent redrawing in Mentat, should be much faster. Since py_connect has no return value, try this to determine if failed or not + py_mentat.py_connect('',options.port) +# prevent redrawing in Mentat, should be much faster. Since py_connect has no return value, try this to determine if failed or not + output(['*draw_manual'],outputLocals,'Mentat') except: file['croak'].write('Could not connect. Set Tools/Python/"Run as Separate Process" & "Initiate"...\n') sys.exit() @@ -191,7 +190,7 @@ output(['*remove_all_servos', cmds = servoLink() output(cmds,outputLocals,'Mentat') -py_disconnect() +py_mentat.py_disconnect() if options.verbose: output(cmds,outputLocals,sys.stdout) diff --git a/processing/pre/mentat_spectralBox.py b/processing/pre/mentat_spectralBox.py index 780698564..07154ea42 100755 --- a/processing/pre/mentat_spectralBox.py +++ b/processing/pre/mentat_spectralBox.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os, sys, string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -12,19 +12,17 @@ sys.path.append(damask.solver.Marc().libraryPath('../../')) #------------------------------------------------------------------------------------------------- def outMentat(cmd,locals): -#------------------------------------------------------------------------------------------------- if cmd[0:3] == '(!)': exec(cmd[3:]) elif cmd[0:3] == '(?)': cmd = eval(cmd[3:]) - py_send(cmd) + py_mentat.py_send(cmd) else: - py_send(cmd) + py_mentat.py_send(cmd) return #------------------------------------------------------------------------------------------------- def outFile(cmd,locals,dest): -#------------------------------------------------------------------------------------------------- if cmd[0:3] == '(!)': exec(cmd[3:]) elif cmd[0:3] == '(?)': @@ -36,7 +34,6 @@ def outFile(cmd,locals,dest): #------------------------------------------------------------------------------------------------- def output(cmds,locals,dest): -#------------------------------------------------------------------------------------------------- for cmd in cmds: if isinstance(cmd,list): output(cmd,locals,dest) @@ -51,26 +48,24 @@ def output(cmds,locals,dest): #------------------------------------------------------------------------------------------------- def init(): -#------------------------------------------------------------------------------------------------- - return [ - "#"+' '.join([scriptID] + sys.argv[1:]), - "*draw_manual", # prevent redrawing in Mentat, should be much faster - "*new_model yes", - "*reset", - "*select_clear", - "*set_element_class hex8", - "*set_nodes off", - "*elements_solid", - "*show_view 4", - "*reset_view", - "*view_perspective", - "*redraw", - ] + return [ + "#"+' '.join([scriptID] + sys.argv[1:]), + "*draw_manual", # prevent redrawing in Mentat, should be much faster + "*new_model yes", + "*reset", + "*select_clear", + "*set_element_class hex8", + "*set_nodes off", + "*elements_solid", + "*show_view 4", + "*reset_view", + "*view_perspective", + "*redraw", + ] #------------------------------------------------------------------------------------------------- def mesh(r,d): -#------------------------------------------------------------------------------------------------- return [ "*add_nodes", "%f %f %f"%(0.0,0.0,0.0), @@ -102,7 +97,6 @@ def mesh(r,d): #------------------------------------------------------------------------------------------------- def material(): -#------------------------------------------------------------------------------------------------- cmds = [\ "*new_mater standard", "*mater_option general:state:solid", @@ -112,7 +106,7 @@ def material(): "*add_mater_elements", "all_existing", "*geometry_type mech_three_solid", -# "*geometry_option red_integ_capacity:on", # see below: reduced integration with one IP gave trouble being always OUTDATED... +# "*geometry_option red_integ_capacity:on", reduced integration with one IP gave trouble being always OUTDATED... "*add_geometry_elements", "all_existing", ] @@ -122,13 +116,13 @@ def material(): #------------------------------------------------------------------------------------------------- def geometry(): -#------------------------------------------------------------------------------------------------- cmds = [\ "*geometry_type mech_three_solid", # "*geometry_option red_integ_capacity:on", "*add_geometry_elements", "all_existing", - "*element_type 7", # we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config) +# we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config) + "*element_type 7", "all_existing", ] @@ -137,7 +131,6 @@ def geometry(): #------------------------------------------------------------------------------------------------- def initial_conditions(homogenization,microstructures): -#------------------------------------------------------------------------------------------------- elements = [] element = 0 for id in microstructures: @@ -204,7 +197,7 @@ parser.set_defaults(port = None, if options.port: try: - from py_mentat import * + import py_mentat except: parser.error('no valid Mentat release found.') @@ -258,9 +251,9 @@ for name in filenames: outputLocals = {} if options.port: - py_connect('',options.port) + py_mentat.py_connect('',options.port) output(cmds,outputLocals,'Mentat') - py_disconnect() + py_mentat.py_disconnect() else: output(cmds,outputLocals,table.__IO__['out']) # bad hack into internals of table class... diff --git a/processing/pre/patchFromReconstructedBoundaries.py b/processing/pre/patchFromReconstructedBoundaries.py index 9dd04f773..31d8a7461 100755 --- a/processing/pre/patchFromReconstructedBoundaries.py +++ b/processing/pre/patchFromReconstructedBoundaries.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import sys,os,math,re,string +import sys,os,math,re from optparse import OptionParser import damask @@ -18,7 +18,7 @@ except: sys.path.append(damask.solver.Marc().libraryPath('../../')) try: # check for MSC.Mentat Python interface - from py_mentat import * + import py_mentat MentatCapability = True except: MentatCapability = False @@ -29,10 +29,10 @@ def outMentat(cmd,locals): exec(cmd[3:]) elif cmd[0:3] == '(?)': cmd = eval(cmd[3:]) - py_send(cmd) + py_mentat.py_send(cmd) if 'log' in locals: locals['log'].append(cmd) else: - py_send(cmd) + py_mentat.py_send(cmd) if 'log' in locals: locals['log'].append(cmd) return @@ -83,10 +83,9 @@ def rcbOrientationParser(content,idcolumn): return grains -def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn): # parser for TSL-OIM reconstructed boundary files - +def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn): + """parser for TSL-OIM reconstructed boundary files""" # find bounding box - boxX = [1.*sys.maxint,-1.*sys.maxint] boxY = [1.*sys.maxint,-1.*sys.maxint] x = [0.,0.] @@ -145,8 +144,8 @@ def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn): # parser for T match = True break break +# force to boundary if inside tolerance to it if (not match): - # force to boundary if inside tolerance to it if (abs(x[i])= y) # am I above testpoint? - for i in range(npoints): # loop through all points - (x2,y2) = points[i] # next point - endover = (y2 >= y) # am I above testpoint? - if (startover != endover): # one above one below testpoint? - if((y2 - y)*(x2 - x1) <= (y2 - y1)*(x2 - x)): # check for intersection - if (endover): - inside = not inside # found intersection - else: - if (not endover): - inside = not inside # found intersection - startover = endover # make second point first point - (x1,y1) = (x2,y2) - - return inside +def inside(x,y,points): + """tests whether point(x,y) is within polygon described by points""" + inside = False + npoints=len(points) + (x1,y1) = points[npoints-1] # start with last point of points + startover = (y1 >= y) # am I above testpoint? + for i in range(npoints): # loop through all points + (x2,y2) = points[i] # next point + endover = (y2 >= y) # am I above testpoint? + if (startover != endover): # one above one below testpoint? + if((y2 - y)*(x2 - x1) <= (y2 - y1)*(x2 - x)): # check for intersection + if (endover): + inside = not inside # found intersection + else: + if (not endover): + inside = not inside # found intersection + startover = endover # make second point first point + (x1,y1) = (x2,y2) + + return inside # ------------------------- -def fftbuild(rcData,height,xframe,yframe,resolution,extrusion): # build array of grain numbers -# ------------------------- - maxX = -1.*sys.maxint - maxY = -1.*sys.maxint - for line in rcData['point']: # find data range - (x,y) = line - maxX = max(maxX, x) - maxY = max(maxY, y) - xsize = maxX+2*xframe # add framsize - ysize = maxY+2*yframe - xres = int(round(resolution/2.0)*2) # use only even resolution - yres = int(round(xres/xsize*ysize/2.0)*2) # calculate other resolutions - zres = extrusion - zsize = extrusion*min([xsize/xres,ysize/yres]) +def fftbuild(rcData,height,xframe,yframe,resolution,extrusion): + """build array of grain numbers""" + maxX = -1.*sys.maxint + maxY = -1.*sys.maxint + for line in rcData['point']: # find data range + (x,y) = line + maxX = max(maxX, x) + maxY = max(maxY, y) + xsize = maxX+2*xframe # add framsize + ysize = maxY+2*yframe + xres = int(round(resolution/2.0)*2) # use only even resolution + yres = int(round(xres/xsize*ysize/2.0)*2) # calculate other resolutions + zres = extrusion + zsize = extrusion*min([xsize/xres,ysize/yres]) + + fftdata = {'fftpoints':[], \ + 'resolution':(xres,yres,zres), \ + 'dimension':(xsize,ysize,zsize)} + + frameindex=len(rcData['grain'])+1 # calculate frame index as largest grain index plus one + dx = xsize/(xres+1) # calculate step sizes + dy = ysize/(yres+1) - fftdata = {'fftpoints':[], \ - 'resolution':(xres,yres,zres), \ - 'dimension':(xsize,ysize,zsize)} + grainpoints = [] + for segments in rcData['grain']: # get segments of each grain + points = {} + for i,segment in enumerate(segments[:-1]): # loop thru segments except last (s=[start,end]) + points[rcData['segment'][segment][0]] = i # assign segment index to start point + points[rcData['segment'][segment][1]] = i # assigne segment index to endpoint + for i in range(2): # check points of last segment + if points[rcData['segment'][segments[-1]][i]] != 0: # not on first segment + points[rcData['segment'][segments[-1]][i]] = len(segments)-1 # assign segment index to last point + + grainpoints.append([]) # start out blank for current grain + for p in sorted(points, key=points.get): # loop thru set of sorted points + grainpoints[-1].append([rcData['point'][p][0],rcData['point'][p][1]]) # append x,y of point - frameindex=len(rcData['grain'])+1 # calculate frame index as largest grain index plus one - dx = xsize/(xres+1) # calculate step sizes - dy = ysize/(yres+1) - - grainpoints = [] - for segments in rcData['grain']: # get segments of each grain - points = {} - for i,segment in enumerate(segments[:-1]): # loop thru segments except last (s=[start,end]) - points[rcData['segment'][segment][0]] = i # assign segment index to start point - points[rcData['segment'][segment][1]] = i # assigne segment index to endpoint - for i in range(2): # check points of last segment - if points[rcData['segment'][segments[-1]][i]] != 0: # not on first segment - points[rcData['segment'][segments[-1]][i]] = len(segments)-1 # assign segment index to last point - - grainpoints.append([]) # start out blank for current grain - for p in sorted(points, key=points.get): # loop thru set of sorted points - grainpoints[-1].append([rcData['point'][p][0],rcData['point'][p][1]]) # append x,y of point - - bestGuess = 0 # assume grain 0 as best guess - for i in range(int(xres*yres)): # walk through all points in xy plane - xtest = -xframe+((i%xres)+0.5)*dx # calculate coordinates - ytest = -yframe+(int(i/xres)+0.5)*dy - if(xtest < 0 or xtest > maxX): # check wether part of frame - if( ytest < 0 or ytest > maxY): # part of edges - fftdata['fftpoints'].append(frameindex+2) # append frameindex to result array - else: # part of xframe - fftdata['fftpoints'].append(frameindex) # append frameindex to result array - elif( ytest < 0 or ytest > maxY): # part of yframe - fftdata['fftpoints'].append(frameindex+1) # append frameindex to result array - else: - if inside(xtest,ytest,grainpoints[bestGuess]): # check best guess first - fftdata['fftpoints'].append(bestGuess+1) - else: # no success - for g in range(len(grainpoints)): # test all - if inside(xtest,ytest,grainpoints[g]): - fftdata['fftpoints'].append(g+1) - bestGuess = g - break - - return fftdata + bestGuess = 0 # assume grain 0 as best guess + for i in range(int(xres*yres)): # walk through all points in xy plane + xtest = -xframe+((i%xres)+0.5)*dx # calculate coordinates + ytest = -yframe+(int(i/xres)+0.5)*dy + if(xtest < 0 or xtest > maxX): # check wether part of frame + if( ytest < 0 or ytest > maxY): # part of edges + fftdata['fftpoints'].append(frameindex+2) # append frameindex to result array + else: # part of xframe + fftdata['fftpoints'].append(frameindex) # append frameindex to result array + elif( ytest < 0 or ytest > maxY): # part of yframe + fftdata['fftpoints'].append(frameindex+1) # append frameindex to result array + else: + if inside(xtest,ytest,grainpoints[bestGuess]): # check best guess first + fftdata['fftpoints'].append(bestGuess+1) + else: # no success + for g in range(len(grainpoints)): # test all + if inside(xtest,ytest,grainpoints[g]): + fftdata['fftpoints'].append(g+1) + bestGuess = g + break + + return fftdata # ----------------------- MAIN ------------------------------- @@ -893,7 +893,8 @@ if 'spectral' in options.output: (y+1)*fftdata['resolution'][0]]))+'\n') # grain indexes, x-row per line geomFile.close() # close geom file - print('assigned %i out of %i (2D) Fourier points.'%(len(fftdata['fftpoints']), int(fftdata['resolution'][0])*int(fftdata['resolution'][1]))) + print('assigned %i out of %i (2D) Fourier points.'\ + %(len(fftdata['fftpoints']), int(fftdata['resolution'][0])*int(fftdata['resolution'][1]))) # ----- write Mentat procedure ----- @@ -926,12 +927,12 @@ if 'mentat' in options.output: ] outputLocals = {'log':[]} - if (options.port != None): - py_connect('',options.port) + if (options.port is not None): + py_mentat.py_connect('',options.port) try: output(cmds,outputLocals,'Mentat') finally: - py_disconnect() + py_mentat.py_disconnect() if 'procedure' in options.output: output(outputLocals['log'],outputLocals,'Stdout') else: @@ -976,7 +977,8 @@ if 'mentat' in options.output or 'spectral' in options.output: for grain in rcData['grainMapping']: output += '\n[grain %i]\n'%grain + \ '(gauss)\tphi1\t%f\tphi\t%f\tphi2\t%f\tscatter\t%f\tfraction\t1.0\n'\ - %(math.degrees(orientationData[grain-1][0]),math.degrees(orientationData[grain-1][1]),math.degrees(orientationData[grain-1][2]),options.scatter) + %(math.degrees(orientationData[grain-1][0]),math.degrees(orientationData[grain-1][1]),\ + math.degrees(orientationData[grain-1][2]),options.scatter) if (options.xmargin > 0.0 or options.ymargin > 0.0): output += '\n[margin]\n' + \ '(random)\t\tscatter\t0.0\tfraction\t1.0\n' diff --git a/processing/pre/seeds_check.py b/processing/pre/seeds_check.py index 7460fa9a3..27341e0fe 100755 --- a/processing/pre/seeds_check.py +++ b/processing/pre/seeds_check.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,vtk +import os,sys,vtk import numpy as np import damask from optparse import OptionParser @@ -66,8 +66,8 @@ for name in filenames: if info['size'][i] <= 0.0: # any invalid size? info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i])) - if table.label_dimension(options.position) != 3: errors.append('columns "{}" have dimension {}'.format(options.position, - table.label_dimension(options.position))) + if table.label_dimension(options.position) != 3: + errors.append('columns "{}" have dimension {}'.format(options.position,table.label_dimension(options.position))) if remarks != []: damask.util.croak(remarks) if errors != []: damask.util.croak(errors) @@ -110,8 +110,7 @@ for name in filenames: (directory,filename) = os.path.split(name) writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] - +'.'+writer.GetDefaultFileExtension())) + writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0]+'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() writer.WriteToOutputStringOn() @@ -120,7 +119,7 @@ for name in filenames: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid) else: writer.SetInputData(grid) writer.Write() - if name == None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) + if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) table.close() diff --git a/processing/pre/seeds_fromDistribution.py b/processing/pre/seeds_fromDistribution.py index a68c8c802..d2b51519d 100755 --- a/processing/pre/seeds_fromDistribution.py +++ b/processing/pre/seeds_fromDistribution.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: UTF-8 no BOM -*- -import threading,time,os,subprocess,shlex,string,sys,random +import threading,time,os,sys,random import numpy as np from optparse import OptionParser from operator import mul @@ -16,10 +16,8 @@ currentSeedsName = None #--------------------------------------------------------------------------------------------------- class myThread (threading.Thread): -#--------------------------------------------------------------------------------------------------- - ''' - perturbes seed in seed file, performes Voronoi tessellation, evaluates, and updates best match - ''' + """perturbes seed in seed file, performes Voronoi tessellation, evaluates, and updates best match""" + def __init__(self, threadID): threading.Thread.__init__(self) self.threadID = threadID @@ -40,17 +38,17 @@ class myThread (threading.Thread): s.release() random.seed(options.randomSeed+self.threadID) # initializes to given seeds - knownSeedsUpdate = bestSeedsUpdate -1.0 # trigger update of local best seeds (time when the best seed file was found known to thread) + knownSeedsUpdate = bestSeedsUpdate -1.0 # trigger update of local best seeds randReset = True # aquire new direction - myBestSeedsVFile = StringIO() # in-memory file to store local copy of best seeds file - perturbedSeedsVFile = StringIO() # in-memory file for perturbed best seeds file - perturbedGeomVFile = StringIO() # in-memory file for tessellated geom file + myBestSeedsVFile = StringIO() # store local copy of best seeds file + perturbedSeedsVFile = StringIO() # perturbed best seeds file + perturbedGeomVFile = StringIO() # tessellated geom file #--- still not matching desired bin class ---------------------------------------------------------- while bestMatch < options.threshold: - s.acquire() # accessing global data, ensure only one thread does it per time - if bestSeedsUpdate > knownSeedsUpdate: # if a newer best seed file exist, read it into a virtual file + s.acquire() # ensure only one thread acces global data + if bestSeedsUpdate > knownSeedsUpdate: # write best fit to virtual file knownSeedsUpdate = bestSeedsUpdate bestSeedsVFile.reset() myBestSeedsVFile.close() @@ -78,7 +76,7 @@ class myThread (threading.Thread): perturbedSeedsVFile = StringIO() myBestSeedsVFile.reset() - perturbedSeedsTable = damask.ASCIItable(myBestSeedsVFile,perturbedSeedsVFile,labeled=True) # read current best fitting seed file and to perturbed seed file + perturbedSeedsTable = damask.ASCIItable(myBestSeedsVFile,perturbedSeedsVFile,labeled=True) # write best fit to perturbed seed file perturbedSeedsTable.head_read() perturbedSeedsTable.head_write() outputAlive=True @@ -87,7 +85,7 @@ class myThread (threading.Thread): while outputAlive and perturbedSeedsTable.data_read(): # perturbe selected microstructure if ms in selectedMs: newCoords=np.array(tuple(map(float,perturbedSeedsTable.data[0:3]))+direction[i]) - newCoords=np.where(newCoords>=1.0,newCoords-1.0,newCoords) # ensure that the seeds remain in the box (move one side out, other side in) + newCoords=np.where(newCoords>=1.0,newCoords-1.0,newCoords) # ensure that the seeds remain in the box newCoords=np.where(newCoords <0.0,newCoords+1.0,newCoords) perturbedSeedsTable.data[0:3]=[format(f, '8.6f') for f in newCoords] direction[i]*=2. @@ -115,8 +113,9 @@ class myThread (threading.Thread): for i in xrange(nMicrostructures): # calculate the deviation in all bins per histogram currentHist.append(np.histogram(currentData,bins=target[i]['bins'])[0]) currentError.append(np.sqrt(np.square(np.array(target[i]['histogram']-currentHist[i])).sum())) - - if currentError[0]>0.0: # as long as not all grains are within the range of the target, use the deviation to left and right as error + +# as long as not all grains are within the range of the target, use the deviation to left and right as error + if currentError[0]>0.0: currentError[0] *=((target[0]['bins'][0]-np.min(currentData))**2.0+ (target[0]['bins'][1]-np.max(currentData))**2.0)**0.5 # norm of deviations by number of usual bin deviation s.acquire() # do the evaluation serially @@ -134,10 +133,10 @@ class myThread (threading.Thread): break elif currentError[i] < target[i]['error']: # better fit bestSeedsUpdate = time.time() # save time of better fit - damask.util.croak('Thread %i: Better match (%i bins, %6.4f --> %6.4f)' - %(self.threadID,i+1,target[i]['error'],currentError[i])) - damask.util.croak(' target: ',target[i]['histogram']) - damask.util.croak(' best: ',currentHist[i]) + damask.util.croak('Thread {:d}: Better match ({:d} bins, {:6.4f} --> {:6.4f})'\ + .format(self.threadID,i+1,target[i]['error'],currentError[i])) + damask.util.croak(' target: '+np.array_str(target[i]['histogram'])) + damask.util.croak(' best: '+np.array_str(currentHist[i])) currentSeedsName = baseFile+'_'+str(bestSeedsUpdate).replace('.','-') # name of new seed file (use time as unique identifier) perturbedSeedsVFile.reset() bestSeedsVFile.close() @@ -150,11 +149,11 @@ class myThread (threading.Thread): for j in xrange(nMicrostructures): # save new errors for all bins target[j]['error'] = currentError[j] if myMatch > match: # one or more new bins have no deviation - damask.util.croak( 'Stage %i cleared'%(myMatch)) + damask.util.croak( 'Stage {:d} cleared'.format(myMatch)) match=myMatch sys.stdout.flush() break - if i == min(nMicrostructures,myMatch+options.bins)-1: # same quality as before (for the considered bins): take it to keep on moving + if i == min(nMicrostructures,myMatch+options.bins)-1: # same quality as before: take it to keep on moving bestSeedsUpdate = time.time() perturbedSeedsVFile.reset() bestSeedsVFile.close() @@ -165,8 +164,8 @@ class myThread (threading.Thread): target[j]['error'] = currentError[j] randReset = True else: #--- not all grains are tessellated - damask.util.croak('Thread %i: Microstructure mismatch (%i microstructures mapped)' - %(self.threadID,myNmicrostructures)) + damask.util.croak('Thread {:d}: Microstructure mismatch ({:d} microstructures mapped)'\ + .format(self.threadID,myNmicrostructures)) randReset = True @@ -215,7 +214,7 @@ options = parser.parse_args()[0] damask.util.report(scriptName,options.seedFile) -if options.randomSeed == None: +if options.randomSeed is None: options.randomSeed = int(os.urandom(4).encode('hex'), 16) damask.util.croak(options.randomSeed) delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2]) @@ -244,7 +243,7 @@ if os.path.isfile(os.path.splitext(options.seedFile)[0]+'.seeds'): else: bestSeedsVFile.write(damask.util.execute('seeds_fromRandom'+\ ' -g '+' '.join(map(str, options.grid))+\ - ' -r %i'%options.randomSeed+\ + ' -r {:d}'.format(options.randomSeed)+\ ' -N '+str(nMicrostructures))[0]) bestSeedsUpdate = time.time() @@ -280,7 +279,7 @@ if options.maxseeds < 1: else: maxSeeds = options.maxseeds -if match >0: damask.util.croak('Stage %i cleared'%match) +if match >0: damask.util.croak('Stage {:d} cleared'.format(match)) sys.stdout.flush() initialGeomVFile.close() diff --git a/processing/pre/seeds_fromGeom.py b/processing/pre/seeds_fromGeom.py index b3fb1677c..b2b12b95f 100755 --- a/processing/pre/seeds_fromGeom.py +++ b/processing/pre/seeds_fromGeom.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string +import os,sys import numpy as np from optparse import OptionParser import damask @@ -88,8 +88,10 @@ for name in filenames: yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2]) zz = np.repeat(z,info['grid'][0]*info['grid'][1]) - mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != [] else np.full_like(microstructure,True,dtype=bool), - np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != [] else np.full_like(microstructure,True,dtype=bool)) + mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != [] + else np.full_like(microstructure,True,dtype=bool), + np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != [] + else np.full_like(microstructure,True,dtype=bool)) # ------------------------------------------ assemble header --------------------------------------- diff --git a/processing/pre/seeds_fromPokes.py b/processing/pre/seeds_fromPokes.py index 80f13c85c..7915e8951 100755 --- a/processing/pre/seeds_fromPokes.py +++ b/processing/pre/seeds_fromPokes.py @@ -122,10 +122,9 @@ for name in filenames: newInfo['microstructures'] = len(np.unique(seeds[:,3])) # --- report --------------------------------------------------------------------------------------- + if (newInfo['microstructures'] != info['microstructures']): + damask.util.croak('--> microstructures: %i'%newInfo['microstructures']) - remarks = [] - if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures']) - if remarks != []: damask.util.croak(remarks) # ------------------------------------------ assemble header --------------------------------------- table.info_clear() diff --git a/processing/pre/seeds_fromRandom.py b/processing/pre/seeds_fromRandom.py index a6ce90cec..f0a3b81df 100755 --- a/processing/pre/seeds_fromRandom.py +++ b/processing/pre/seeds_fromRandom.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,string,math,random +import os,sys,math,random import numpy as np import damask from optparse import OptionParser,OptionGroup @@ -14,9 +14,7 @@ scriptID = ' '.join([scriptName,damask.version]) # ------------------------------------------ aux functions --------------------------------- def kdtree_search(cloud, queryPoints): - ''' - find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d) - ''' + """find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d)""" n = queryPoints.shape[0] distances = np.zeros(n,dtype=float) tree = spatial.cKDTree(cloud) @@ -50,8 +48,11 @@ parser.add_option('-m', '--microstructure', parser.add_option('-r', '--rnd', dest = 'randomSeed', type = 'int', metavar = 'int', help = 'seed of random number generator [%default]') +parser.add_option('--format', + dest = 'format', type = 'string', metavar = 'string', + help = 'number format of output [auto]') -group = OptionGroup(parser, "Laguerre Tessellation Options", +group = OptionGroup(parser, "Laguerre Tessellation", "Parameters determining shape of weight distribution of seed points" ) group.add_option('-w', '--weights', @@ -72,8 +73,8 @@ group.add_option('--sigma', help='standard deviation of normal distribution for weights [%default]') parser.add_option_group(group) -group = OptionGroup(parser, "Selective Seeding Options", - "More uniform distribution of seed points using Mitchell\'s Best Candidate Algorithm" +group = OptionGroup(parser, "Selective Seeding", + "More uniform distribution of seed points using Mitchell's Best Candidate Algorithm" ) group.add_option('-s','--selective', action = 'store_true', @@ -105,6 +106,7 @@ parser.set_defaults(randomSeed = None, force = False, distance = 0.2, numCandidates = 10, + format = None, ) (options,filenames) = parser.parse_args() @@ -112,7 +114,7 @@ parser.set_defaults(randomSeed = None, options.grid = np.array(options.grid) gridSize = options.grid.prod() -if options.randomSeed == None: options.randomSeed = int(os.urandom(4).encode('hex'), 16) +if options.randomSeed is None: options.randomSeed = int(os.urandom(4).encode('hex'), 16) np.random.seed(options.randomSeed) # init random generators random.seed(options.randomSeed) @@ -133,10 +135,12 @@ for name in filenames: remarks = [] errors = [] - if gridSize == 0: errors.append('zero grid dimension for %s.'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]]))) + if gridSize == 0: + errors.append('zero grid dimension for %s.'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]]))) if options.N > gridSize/10.: errors.append('seed count exceeds 0.1 of grid points.') if options.selective and 4./3.*math.pi*(options.distance/2.)**3*options.N > 0.5: - (remarks if options.force else errors).append('maximum recommended seed point count for given distance is {}.{}'.format(int(3./8./math.pi/(options.distance/2.)**3),'..'*options.force)) + (remarks if options.force else errors).append('maximum recommended seed point count for given distance is {}.{}'. + format(int(3./8./math.pi/(options.distance/2.)**3),'..'*options.force)) if remarks != []: damask.util.croak(remarks) if errors != []: @@ -153,7 +157,7 @@ for name in filenames: if not options.selective: seeds = np.zeros((3,options.N),dtype='d') # seed positions array - gridpoints = random.sample(range(gridSize),options.N) # create random permutation of all grid positions and choose first N + gridpoints = random.sample(range(gridSize),options.N) # choose first N from random permutation of grid positions seeds[0,:] = (np.mod(gridpoints ,options.grid[0])\ +np.random.random(options.N)) /options.grid[0] @@ -174,7 +178,7 @@ for name in filenames: distances = kdtree_search(seeds[:i],candidates) best = distances.argmax() if distances[best] > options.distance: # require minimum separation - seeds[i] = candidates[best] # take candidate with maximum separation to existing point cloud + seeds[i] = candidates[best] # maximum separation to existing point cloud i += 1 if i%(options.N/100.) < 1: damask.util.croak('.',False) @@ -215,7 +219,7 @@ for name in filenames: # --- write seeds information ------------------------------------------------------------ table.data = seeds - table.data_writeArray() + table.data_writeArray(fmt = options.format) # --- output finalization -------------------------------------------------------------------------- diff --git a/processing/pre/seeds_fromTable.py b/processing/pre/seeds_fromTable.py index 293c1a66f..45a2b386b 100755 --- a/processing/pre/seeds_fromTable.py +++ b/processing/pre/seeds_fromTable.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,string,itertools +import os,itertools import numpy as np from optparse import OptionParser import damask @@ -50,8 +50,8 @@ parser.set_defaults(pos = 'pos', (options,filenames) = parser.parse_args() -if options.whitelist != None: options.whitelist = map(int,options.whitelist) -if options.blacklist != None: options.blacklist = map(int,options.blacklist) +if options.whitelist is not None: options.whitelist = map(int,options.whitelist) +if options.blacklist is not None: options.blacklist = map(int,options.blacklist) # --- loop over input files ------------------------------------------------------------------------- @@ -101,13 +101,11 @@ for name in filenames: # --- filtering of grain voxels -------------------------------------------------------------------- - mask = np.logical_and(\ - np.ones_like(table.data[:,3],bool) \ - if options.whitelist == None \ - else np.in1d(table.data[:,3].ravel(), options.whitelist).reshape(table.data[:,3].shape), - np.ones_like(table.data[:,3],bool) \ - if options.blacklist == None \ - else np.invert(np.in1d(table.data[:,3].ravel(), options.blacklist).reshape(table.data[:,3].shape)) + mask = np.logical_and( + np.ones_like(table.data[:,3],bool) if options.whitelist is None \ + else np.in1d(table.data[:,3].ravel(), options.whitelist).reshape(table.data[:,3].shape), + np.ones_like(table.data[:,3],bool) if options.blacklist is None \ + else np.invert(np.in1d(table.data[:,3].ravel(), options.blacklist).reshape(table.data[:,3].shape)) ) table.data = table.data[mask] diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 91c3eaa6f..0774fba86 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief CPFEM engine diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index ea5691495..51a26dc55 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id: CPFEM.f90 4761 2016-01-17 13:29:42Z MPIE\m.diehl $ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief needs a good name and description diff --git a/src/DAMASK_abaqus_exp.f b/src/DAMASK_abaqus_exp.f index eff898e3e..1fab2472d 100644 --- a/src/DAMASK_abaqus_exp.f +++ b/src/DAMASK_abaqus_exp.f @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Koen Janssens, Paul Scherrer Institut diff --git a/src/DAMASK_abaqus_std.f b/src/DAMASK_abaqus_std.f index faec60650..cdd12dac8 100644 --- a/src/DAMASK_abaqus_std.f +++ b/src/DAMASK_abaqus_std.f @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Koen Janssens, Paul Scherrer Institut diff --git a/src/DAMASK_marc.f90 b/src/DAMASK_marc.f90 index 14dcc5c06..a4542f96a 100644 --- a/src/DAMASK_marc.f90 +++ b/src/DAMASK_marc.f90 @@ -11,8 +11,6 @@ #include "prec.f90" !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Luc Hantcherli, Max-Planck-Institut für Eisenforschung GmbH @@ -423,4 +421,4 @@ subroutine plotv(v,s,sp,etot,eplas,ecreep,t,m,nn,layer,ndi,nshear,jpltcd) if (jpltcd > materialpoint_sizeResults) call IO_error(700_pInt,jpltcd) ! complain about out of bounds error v = materialpoint_results(jpltcd,nn,mesh_FEasCP('elem', m)) -end subroutine plotv +end subroutine plotv \ No newline at end of file diff --git a/src/DAMASK_spectral.f90 b/src/DAMASK_spectral.f90 index 0d83d1279..e999bf5dc 100644 --- a/src/DAMASK_spectral.f90 +++ b/src/DAMASK_spectral.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH @@ -15,7 +13,8 @@ program DAMASK_spectral pInt, & pLongInt, & pReal, & - tol_math_check + tol_math_check, & + dNeq use DAMASK_interface, only: & DAMASK_interface_init, & loadCaseFile, & @@ -61,8 +60,6 @@ program DAMASK_spectral materialpoint_sizeResults, & materialpoint_results, & materialpoint_postResults - - use material, only: & thermal_type, & damage_type, & @@ -151,7 +148,9 @@ program DAMASK_spectral MPI_file_seek, & MPI_file_get_position, & MPI_file_write, & - MPI_allreduce + MPI_abort, & + MPI_allreduce, & + PETScFinalize !-------------------------------------------------------------------------------------------------- ! init DAMASK (all modules) @@ -331,7 +330,7 @@ program DAMASK_spectral errorID = 838_pInt ! no rotation is allowed by stress BC write(6,'(2x,a)') 'stress / GPa:' do i = 1_pInt, 3_pInt; do j = 1_pInt, 3_pInt - if(loadCases(currentLoadCase)%deformation%maskLogical(i,j)) then + if(loadCases(currentLoadCase)%P%maskLogical(i,j)) then write(6,'(2x,f12.7)',advance='no') loadCases(currentLoadCase)%P%values(i,j)*1e-9_pReal else write(6,'(2x,12a)',advance='no') ' * ' @@ -343,7 +342,7 @@ program DAMASK_spectral reshape(spread(tol_math_check,1,9),[ 3,3]))& .or. abs(math_det33(loadCases(currentLoadCase)%rotation)) > & 1.0_pReal + tol_math_check) errorID = 846_pInt ! given rotation matrix contains strain - if (any(loadCases(currentLoadCase)%rotation /= math_I3)) & + if (any(dNeq(loadCases(currentLoadCase)%rotation, math_I3))) & write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',& math_transpose33(loadCases(currentLoadCase)%rotation) if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment @@ -427,28 +426,33 @@ program DAMASK_spectral !-------------------------------------------------------------------------------------------------- ! prepare MPI parallel out (including opening of file) allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND) - outputSize(worldrank+1) = int(size(materialpoint_results)*pReal,MPI_OFFSET_KIND) - call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND) + call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_allreduce') call MPI_file_open(PETSC_COMM_WORLD, & trim(getSolverWorkingDirectoryName())//trim(getSolverJobName())//'.spectralOut', & MPI_MODE_WRONLY + MPI_MODE_APPEND, & MPI_INFO_NULL, & resUnit, & ierr) + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_open') call MPI_file_get_position(resUnit,fileOffset,ierr) ! get offset from header + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_get_position') fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me) call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr) + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek') if (.not. appendToOutFile) then ! if not restarting, write 0th increment do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output - outputIndex=[(i-1)*((maxByteOut/pReal)/materialpoint_sizeResults)+1, & - min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))] + outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, & + min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),& [(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), & (outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,& MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) - fileOffset = fileOffset + sum(outputSize) ! forward to current file position + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write') enddo + fileOffset = fileOffset + sum(outputSize) ! forward to current file position if (worldrank == 0) & write(6,'(1/,a)') ' ... writing initial configuration to file ........................' endif @@ -647,15 +651,17 @@ program DAMASK_spectral write(6,'(1/,a)') ' ... writing results to file ......................................' call materialpoint_postResults() call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr) + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek') do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output - outputIndex=[(i-1)*maxByteOut/pReal/materialpoint_sizeResults+1, & - min(i*maxByteOut/pReal/materialpoint_sizeResults,size(materialpoint_results,3))] + outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, & + min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),& [(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), & (outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,& MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) - fileOffset = fileOffset + sum(outputSize) ! forward to current file position + if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write') enddo + fileOffset = fileOffset + sum(outputSize) ! forward to current file position endif if( loadCases(currentLoadCase)%restartFrequency > 0_pInt .and. & ! at frequency of writing restart information set restart parameter for FEsolving mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0_pInt) then ! first call to CPFEM_general will write? @@ -702,7 +708,7 @@ program DAMASK_spectral enddo call utilities_destroy() - call PetscFinalize(ierr); CHKERRQ(ierr) + call PETScFinalize(ierr); CHKERRQ(ierr) if (notConvergedCounter > 0_pInt) call quit(3_pInt) ! error if some are not converged call quit(0_pInt) ! no complains ;) diff --git a/src/FEsolving.f90 b/src/FEsolving.f90 index ed11448d7..3b0aeb194 100644 --- a/src/FEsolving.f90 +++ b/src/FEsolving.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief triggering reading in of restart information when doing a restart diff --git a/src/IO.f90 b/src/IO.f90 index 95ac6fffd..576514d81 100644 --- a/src/IO.f90 +++ b/src/IO.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH @@ -1239,8 +1237,7 @@ character(len=300) pure function IO_extractValue(pair,key) IO_extractValue = '' myChunk = scan(pair,SEP) - if (myChunk > 0 .and. pair(:myChunk-1) == key(:myChunk-1)) & - IO_extractValue = pair(myChunk+1:) ! extract value if key matches + if (myChunk > 0 .and. pair(:myChunk-1) == key) IO_extractValue = pair(myChunk+1:) ! extract value if key matches end function IO_extractValue @@ -1672,7 +1669,9 @@ subroutine IO_error(error_ID,el,ip,g,ext_msg) msg = 'unknown filter type selected' case (893_pInt) msg = 'PETSc: SNES_DIVERGED_FNORM_NAN' - + case (894_pInt) + msg = 'MPI error' + !------------------------------------------------------------------------------------------------- ! error messages related to parsing of Abaqus input file case (900_pInt) diff --git a/src/commercialFEM_fileList.f90 b/src/commercialFEM_fileList.f90 index 8567da5b1..7d02eadfc 100644 --- a/src/commercialFEM_fileList.f90 +++ b/src/commercialFEM_fileList.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @brief all DAMASK files without solver !> @details List of files needed by MSC.Marc, Abaqus/Explicit, and Abaqus/Standard diff --git a/src/compilation_info.f90 b/src/compilation_info.f90 index 64e6b136c..3fc12f1ee 100644 --- a/src/compilation_info.f90 +++ b/src/compilation_info.f90 @@ -1,5 +1,3 @@ -!############################################################## -!$Id$ #ifdef __GFORTRAN__ write(6,*) 'Compiled with ', compiler_version() !not supported by and ifort <= 15 (and old gfortran) write(6,*) 'With options ', compiler_options() diff --git a/src/constitutive.f90 b/src/constitutive.f90 index 50c77b481..93fb9f577 100644 --- a/src/constitutive.f90 +++ b/src/constitutive.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief elasticity, plasticity, internal microstructure state diff --git a/src/core_quit.f90 b/src/core_quit.f90 index 8446e77c8..3a730c82d 100644 --- a/src/core_quit.f90 +++ b/src/core_quit.f90 @@ -1,6 +1,3 @@ -!################################################################################################## -! $Id$ -!################################################################################################## !******************************************************************** ! quit subroutine to satisfy IO_error for core module ! diff --git a/src/crystallite.f90 b/src/crystallite.f90 index 6ca40ffef..71a5e4743 100644 --- a/src/crystallite.f90 +++ b/src/crystallite.f90 @@ -1,10 +1,8 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH +!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH -!> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH -!> @author Chen Zhang, Michigan State University +!> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH +!> @author Chen Zhang, Michigan State University !> @brief crystallite state integration functions and reporting of results !-------------------------------------------------------------------------------------------------- @@ -3571,11 +3569,7 @@ logical function crystallite_integrateStress(& maxticks external :: & -#if(FLOAT==8) dgesv -#elif(FLOAT==4) - sgesv -#endif !* be pessimistic crystallite_integrateStress = .false. @@ -3758,11 +3752,7 @@ logical function crystallite_integrateStress(& - math_Plain3333to99(math_mul3333xx3333(math_mul3333xx3333(dLp_dT3333,dT_dFe3333),dFe_dLp3333)) dRLp_dLp2 = dRLp_dLp ! will be overwritten in first call to LAPACK routine work = math_plain33to9(residuumLp) -#if(FLOAT==8) call dgesv(9,1,dRLp_dLp2,9,ipiv,work,9,ierr) ! solve dRLp/dLp * delta Lp = -res for delta Lp -#elif(FLOAT==4) - call sgesv(9,1,dRLp_dLp2,9,ipiv,work,9,ierr) ! solve dRLp/dLp * delta Lp = -res for delta Lp -#endif if (ierr /= 0_pInt) then #ifndef _OPENMP if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then @@ -3851,31 +3841,27 @@ logical function crystallite_integrateStress(& math_mul3333xx3333(dT_dFi3333, dFi_dLi3333))) & - math_Plain3333to99(math_mul3333xx3333(dLi_dFi3333, dFi_dLi3333)) work = math_plain33to9(residuumLi) -#if(FLOAT==8) call dgesv(9,1,dRLi_dLi,9,ipiv,work,9,ierr) ! solve dRLi/dLp * delta Li = -res for delta Li -#elif(FLOAT==4) - call sgesv(9,1,dRLi_dLi,9,ipiv,work,9,ierr) ! solve dRLi/dLp * delta Li = -res for delta Li -#endif - if (ierr /= 0_pInt) then + if (ierr /= 0_pInt) then #ifndef _OPENMP - if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then - write(6,'(a,i8,1x,a,i8,a,1x,i2,1x,i3,a,i3)') '<< CRYST >> integrateStress failed on dR/dLi inversion at el ip ipc ', & - el,mesh_element(1,el),ip,ipc - if (iand(debug_level(debug_crystallite), debug_levelExtensive) /= 0_pInt & - .and. ((el == debug_e .and. ip == debug_i .and. ipc == debug_g)& - .or. .not. iand(debug_level(debug_crystallite), debug_levelSelective) /= 0_pInt)) then - write(6,*) - write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dR_dLi',transpose(dRLi_dLi) - write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dFe_dLi',transpose(math_Plain3333to99(dFe_dLi3333)) - write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dT_dFi_constitutive',transpose(math_Plain3333to99(dT_dFi3333)) - write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dLi_dT_constitutive',transpose(math_Plain3333to99(dLi_dT3333)) - write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Li_constitutive',math_transpose33(Li_constitutive) - write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Liguess',math_transpose33(Liguess) - endif + if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then + write(6,'(a,i8,1x,a,i8,a,1x,i2,1x,i3,a,i3)') '<< CRYST >> integrateStress failed on dR/dLi inversion at el ip ipc ', & + el,mesh_element(1,el),ip,ipc + if (iand(debug_level(debug_crystallite), debug_levelExtensive) /= 0_pInt & + .and. ((el == debug_e .and. ip == debug_i .and. ipc == debug_g)& + .or. .not. iand(debug_level(debug_crystallite), debug_levelSelective) /= 0_pInt)) then + write(6,*) + write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dR_dLi',transpose(dRLi_dLi) + write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dFe_dLi',transpose(math_Plain3333to99(dFe_dLi3333)) + write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dT_dFi_constitutive',transpose(math_Plain3333to99(dT_dFi3333)) + write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dLi_dT_constitutive',transpose(math_Plain3333to99(dLi_dT3333)) + write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Li_constitutive',math_transpose33(Li_constitutive) + write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Liguess',math_transpose33(Liguess) endif -#endif - return endif +#endif + return + endif deltaLi = - math_plain9to33(work) endif diff --git a/src/damage_local.f90 b/src/damage_local.f90 index 196382c13..1437213d7 100644 --- a/src/damage_local.f90 +++ b/src/damage_local.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for locally evolving damage field !-------------------------------------------------------------------------------------------------- diff --git a/src/damage_none.f90 b/src/damage_none.f90 index 956ba5cc8..a9ecfb5de 100644 --- a/src/damage_none.f90 +++ b/src/damage_none.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for constant damage field !-------------------------------------------------------------------------------------------------- diff --git a/src/damage_nonlocal.f90 b/src/damage_nonlocal.f90 index 311570781..86805c21b 100644 --- a/src/damage_nonlocal.f90 +++ b/src/damage_nonlocal.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for non-locally evolving damage field !> @details to be done diff --git a/src/debug.f90 b/src/debug.f90 index 2a9c6d800..01020dd39 100644 --- a/src/debug.f90 +++ b/src/debug.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 00186ff06..cbc2a5915 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Denny Tjahjanto, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/homogenization_RGC.f90 b/src/homogenization_RGC.f90 index 323ca2934..0919b1e5e 100644 --- a/src/homogenization_RGC.f90 +++ b/src/homogenization_RGC.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Denny Tjahjanto, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/homogenization_isostrain.f90 b/src/homogenization_isostrain.f90 index 083107d9f..b12e30ab3 100644 --- a/src/homogenization_isostrain.f90 +++ b/src/homogenization_isostrain.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief Isostrain (full constraint Taylor assuption) homogenization scheme diff --git a/src/homogenization_none.f90 b/src/homogenization_none.f90 index 59e483c27..7f9518e90 100644 --- a/src/homogenization_none.f90 +++ b/src/homogenization_none.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/hydrogenflux_cahnhilliard.f90 b/src/hydrogenflux_cahnhilliard.f90 index d8cb71edc..898e7ed8d 100644 --- a/src/hydrogenflux_cahnhilliard.f90 +++ b/src/hydrogenflux_cahnhilliard.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for conservative transport of solute hydrogen !> @details to be done diff --git a/src/hydrogenflux_isoconc.f90 b/src/hydrogenflux_isoconc.f90 index 74759d4c3..b4bcfb5e3 100644 --- a/src/hydrogenflux_isoconc.f90 +++ b/src/hydrogenflux_isoconc.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for constant hydrogen concentration !-------------------------------------------------------------------------------------------------- diff --git a/src/kinematics_cleavage_opening.f90 b/src/kinematics_cleavage_opening.f90 index 945e2d08a..8ac1a5646 100644 --- a/src/kinematics_cleavage_opening.f90 +++ b/src/kinematics_cleavage_opening.f90 @@ -1,8 +1,6 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Luv Sharma, Max-Planck-Institut fŸr Eisenforschung GmbH -!> @author Pratheek Shanthraj, Max-Planck-Institut fŸr Eisenforschung GmbH +!> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH +!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating kinematics resulting from opening of cleavage planes !> @details to be done !-------------------------------------------------------------------------------------------------- diff --git a/src/kinematics_hydrogen_strain.f90 b/src/kinematics_hydrogen_strain.f90 index ceb3b1ef3..154b97e79 100644 --- a/src/kinematics_hydrogen_strain.f90 +++ b/src/kinematics_hydrogen_strain.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating kinematics resulting from interstitial hydrogen !> @details to be done diff --git a/src/kinematics_slipplane_opening.f90 b/src/kinematics_slipplane_opening.f90 index 8b49e1cf3..60487e5b4 100644 --- a/src/kinematics_slipplane_opening.f90 +++ b/src/kinematics_slipplane_opening.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating kinematics resulting from opening of slip planes diff --git a/src/kinematics_thermal_expansion.f90 b/src/kinematics_thermal_expansion.f90 index b99c499f3..c5a221a7b 100644 --- a/src/kinematics_thermal_expansion.f90 +++ b/src/kinematics_thermal_expansion.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating kinematics resulting from thermal expansion !> @details to be done diff --git a/src/kinematics_vacancy_strain.f90 b/src/kinematics_vacancy_strain.f90 index 899bccd9f..704de7d1f 100644 --- a/src/kinematics_vacancy_strain.f90 +++ b/src/kinematics_vacancy_strain.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating kinematics resulting from vacancy point defects !> @details to be done diff --git a/src/lattice.f90 b/src/lattice.f90 index 8e87ba2a9..05a123125 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH @@ -2184,7 +2182,7 @@ pure function lattice_qDisorientation(Q1, Q2, struct) real(pReal), dimension(4) :: lattice_qDisorientation real(pReal), dimension(4), intent(in) :: & Q1, & ! 1st orientation - Q2 ! 2nd orientation + Q2 ! 2nd orientation integer(kind(LATTICE_undefined_ID)), optional, intent(in) :: & ! if given, symmetries between the two orientation will be considered struct diff --git a/src/libs.f90 b/src/libs.f90 index 7c109cab6..71f300512 100644 --- a/src/libs.f90 +++ b/src/libs.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @brief dummy source for inclusion of Library files !-------------------------------------------------------------------------------------------------- diff --git a/src/material.f90 b/src/material.f90 index c1aacf751..b274cfabf 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief Parses material config file, either solverJobName.materialConfig or material.config @@ -1282,7 +1280,7 @@ subroutine material_populateGrains integer(pInt) :: t,e,i,g,j,m,c,r,homog,micro,sgn,hme, myDebug, & phaseID,textureID,dGrains,myNgrains,myNorientations,myNconstituents, & grain,constituentGrain,ipGrain,symExtension, ip - real(pReal) :: extreme,rnd + real(pReal) :: deviation,extreme,rnd integer(pInt), dimension (:,:), allocatable :: Nelems ! counts number of elements in homog, micro array type(p_intvec), dimension (:,:), allocatable :: elemsOfHomogMicro ! lists element number in homog, micro array @@ -1409,8 +1407,11 @@ subroutine material_populateGrains extreme = 0.0_pReal t = 0_pInt do i = 1_pInt,myNconstituents ! find largest deviator - if (real(sgn,pReal)*log(NgrainsOfConstituent(i)/myNgrains/microstructure_fraction(i,micro)) > extreme) then - extreme = real(sgn,pReal)*log(NgrainsOfConstituent(i)/myNgrains/microstructure_fraction(i,micro)) + deviation = real(sgn,pReal)*log( microstructure_fraction(i,micro) / & + !-------------------------------- & + (real(NgrainsOfConstituent(i),pReal)/real(myNgrains,pReal) ) ) + if (deviation > extreme) then + extreme = deviation t = i endif enddo diff --git a/src/math.f90 b/src/math.f90 index 8636ad6bc..bf7460062 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH @@ -150,8 +148,10 @@ module math math_sampleFiberOri, & math_sampleGaussVar, & math_symmetricEulers, & - math_spectralDecompositionSym33, & - math_spectralDecompositionSym, & + math_eigenvectorBasisSym33, & + math_eigenvectorBasisSym, & + math_eigenValuesVectorsSym33, & + math_eigenValuesVectorsSym, & math_rotationalPart33, & math_invariantsSym33, & math_eigenvaluesSym33, & @@ -186,10 +186,6 @@ module math halton_seed_set, & i_to_halton, & prime - external :: & - dsyev, & - dgetrf, & - dgetri contains @@ -472,7 +468,23 @@ end function math_crossproduct !-------------------------------------------------------------------------------------------------- -!> @brief tensor product a \otimes b +!> @brief tensor product A \otimes B of arbitrary sized vectors A and B +!-------------------------------------------------------------------------------------------------- +pure function math_tensorproduct(A,B) + + implicit none + real(pReal), dimension(:), intent(in) :: A,B + real(pReal), dimension(size(A,1),size(B,1)) :: math_tensorproduct + + integer(pInt) :: i,j + + forall (i=1_pInt:size(A,1),j=1_pInt:size(B,1)) math_tensorproduct(i,j) = A(i)*B(j) + +end function math_tensorproduct + + +!-------------------------------------------------------------------------------------------------- +!> @brief tensor product A \otimes B of leght-3 vectors A and B !-------------------------------------------------------------------------------------------------- pure function math_tensorproduct33(A,B) @@ -682,7 +694,7 @@ pure function math_exp33(A,n) math_exp33 = B ! A^0 = eye2 do i = 1_pInt,n - invfac = invfac/real(i) ! invfac = 1/i! + invfac = invfac/real(i,pReal) ! invfac = 1/i! B = math_mul33x33(B,A) math_exp33 = math_exp33 + invfac*B ! exp = SUM (A^i)/i! enddo @@ -761,6 +773,7 @@ pure subroutine math_invert33(A, InvA, DetA, error) DetA = A(1,1) * InvA(1,1) + A(1,2) * InvA(2,1) + A(1,3) * InvA(3,1) if (abs(DetA) <= tiny(DetA)) then + InvA = 0.0_pReal error = .true. else InvA(1,2) = -A(1,2) * A(3,3) + A(1,3) * A(3,2) @@ -794,15 +807,13 @@ function math_invSym3333(A) integer(pInt), dimension(6) :: ipiv6 real(pReal), dimension(6,6) :: temp66_Real real(pReal), dimension(6) :: work6 + external :: & + dgetrf, & + dgetri temp66_real = math_Mandel3333to66(A) -#if(FLOAT==8) call dgetrf(6,6,temp66_real,6,ipiv6,ierr) call dgetri(6,temp66_real,6,ipiv6,work6,6,ierr) -#elif(FLOAT==4) - call sgetrf(6,6,temp66_real,6,ipiv6,ierr) - call sgetri(6,temp66_real,6,ipiv6,work6,6,ierr) -#endif if (ierr == 0_pInt) then math_invSym3333 = math_Mandel66to3333(temp66_real) else @@ -830,13 +841,8 @@ subroutine math_invert(myDim,A, InvA, error) logical, intent(out) :: error invA = A -#if(FLOAT==8) call dgetrf(myDim,myDim,invA,myDim,ipiv,ierr) call dgetri(myDim,InvA,myDim,ipiv,work,myDim,ierr) -#elif(FLOAT==4) - call sgetrf(myDim,myDim,invA,myDim,ipiv,ierr) - call sgetri(myDim,InvA,myDim,ipiv,work,myDim,ierr) -#endif error = merge(.true.,.false., ierr /= 0_pInt) ! http://fortraninacworld.blogspot.de/2012/12/ternary-operator.html end subroutine math_invert @@ -1913,26 +1919,23 @@ end function math_symmetricEulers !-------------------------------------------------------------------------------------------------- !> @brief eigenvalues and eigenvectors of symmetric matrix m !-------------------------------------------------------------------------------------------------- -subroutine math_spectralDecompositionSym(m,values,vectors,error) +subroutine math_eigenValuesVectorsSym(m,values,vectors,error) implicit none real(pReal), dimension(:,:), intent(in) :: m real(pReal), dimension(size(m,1)), intent(out) :: values real(pReal), dimension(size(m,1),size(m,1)), intent(out) :: vectors logical, intent(out) :: error - integer(pInt) :: info real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f + external :: & + dsyev - vectors = M ! copy matrix to input (doubles as output) array -#if(FLOAT==8) + vectors = m ! copy matrix to input (doubles as output) array call dsyev('V','U',size(m,1),vectors,size(m,1),values,work,(64+2)*size(m,1),info) -#elif(FLOAT==4) - call ssyev('V','U',size(m,1),vectors,size(m,1),values,work,(64+2)*size(m,1),info) -#endif error = (info == 0_pInt) -end subroutine math_spectralDecompositionSym +end subroutine math_eigenValuesVectorsSym !-------------------------------------------------------------------------------------------------- @@ -1942,7 +1945,7 @@ end subroutine math_spectralDecompositionSym !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @details See http://arxiv.org/abs/physics/0610206 (DSYEVH3) !-------------------------------------------------------------------------------------------------- -subroutine math_spectralDecompositionSym33(m,values,vectors) +subroutine math_eigenValuesVectorsSym33(m,values,vectors) implicit none real(pReal), dimension(3,3),intent(in) :: m @@ -1959,7 +1962,7 @@ subroutine math_spectralDecompositionSym33(m,values,vectors) T = maxval(abs(values)) U = max(T, T**2_pInt) - threshold = sqrt(5.0e-14_pReal * U**2_pInt) + threshold = sqrt(5.68e-14_pReal * U**2_pInt) ! Calculate first eigenvector by the formula v[0] = (m - lambda[0]).e1 x (m - lambda[0]).e2 vectors(1:3,1) = [ vectors(1,2) + m(1, 3) * values(1), & @@ -1968,7 +1971,7 @@ subroutine math_spectralDecompositionSym33(m,values,vectors) norm = norm2(vectors(1:3, 1)) fallback1: if(norm < threshold) then - call math_spectralDecompositionSym(m,values,vectors,error) + call math_eigenValuesVectorsSym(m,values,vectors,error) return endif fallback1 @@ -1981,19 +1984,108 @@ subroutine math_spectralDecompositionSym33(m,values,vectors) norm = norm2(vectors(1:3, 2)) fallback2: if(norm < threshold) then - call math_spectralDecompositionSym(m,values,vectors,error) + call math_eigenValuesVectorsSym(m,values,vectors,error) return endif fallback2 vectors(1:3,2) = vectors(1:3, 2) / norm ! Calculate third eigenvector according to v[2] = v[0] x v[1] vectors(1:3,3) = math_crossproduct(vectors(1:3,1),vectors(1:3,2)) - -end subroutine math_spectralDecompositionSym33 +end subroutine math_eigenValuesVectorsSym33 !-------------------------------------------------------------------------------------------------- -!> @brief rotational part from polar decomposition of tensor m +!> @brief eigenvector basis of symmetric matrix m +!-------------------------------------------------------------------------------------------------- +function math_eigenvectorBasisSym(m) + + implicit none + real(pReal), dimension(:,:), intent(in) :: m + real(pReal), dimension(size(m,1)) :: values + real(pReal), dimension(size(m,1),size(m,1)) :: vectors + real(pReal), dimension(size(m,1),size(m,1)) :: math_eigenvectorBasisSym + logical :: error + integer(pInt) :: i + + math_eigenvectorBasisSym = 0.0_pReal + call math_eigenValuesVectorsSym(m,values,vectors,error) + if(error) return + + do i=1_pInt, size(m,1) + math_eigenvectorBasisSym = math_eigenvectorBasisSym & + + sqrt(values(i)) * math_tensorproduct(vectors(:,i),vectors(:,i)) + enddo + +end function math_eigenvectorBasisSym + + +!-------------------------------------------------------------------------------------------------- +!> @brief eigenvector basis of symmetric 33 matrix m +!-------------------------------------------------------------------------------------------------- +function math_eigenvectorBasisSym33(m) + + implicit none + real(pReal), dimension(3,3) :: math_eigenvectorBasisSym33 + real(pReal), dimension(3) :: invariants, values + real(pReal), dimension(3,3), intent(in) :: m + real(pReal) :: P, Q, rho, phi + real(pReal), parameter :: TOL=1.e-14_pReal + real(pReal), dimension(3,3,3) :: N, EB + + invariants = math_invariantsSym33(m) + EB = 0.0_pReal + + P = invariants(2)-invariants(1)**2.0_pReal/3.0_pReal + Q = -2.0_pReal/27.0_pReal*invariants(1)**3.0_pReal+product(invariants(1:2))/3.0_pReal-invariants(3) + + threeSimilarEigenvalues: if(all(abs([P,Q]) < TOL)) then + values = invariants(1)/3.0_pReal +! this is not really correct, but at least the basis is correct + EB(1,1,1)=1.0_pReal + EB(2,2,2)=1.0_pReal + EB(3,3,3)=1.0_pReal + else threeSimilarEigenvalues + rho=sqrt(-3.0_pReal*P**3.0_pReal)/9.0_pReal + phi=acos(math_limit(-Q/rho*0.5_pReal,-1.0_pReal,1.0_pReal)) + values = 2.0_pReal*rho**(1.0_pReal/3.0_pReal)* & + [cos(phi/3.0_pReal), & + cos((phi+2.0_pReal*PI)/3.0_pReal), & + cos((phi+4.0_pReal*PI)/3.0_pReal) & + ] + invariants(1)/3.0_pReal + N(1:3,1:3,1) = m-values(1)*math_I3 + N(1:3,1:3,2) = m-values(2)*math_I3 + N(1:3,1:3,3) = m-values(3)*math_I3 + twoSimilarEigenvalues: if(abs(values(1)-values(2)) < TOL) then + EB(1:3,1:3,3)=math_mul33x33(N(1:3,1:3,1),N(1:3,1:3,2))/ & + ((values(3)-values(1))*(values(3)-values(2))) + EB(1:3,1:3,1)=math_I3-EB(1:3,1:3,3) + elseif(abs(values(2)-values(3)) < TOL) then twoSimilarEigenvalues + EB(1:3,1:3,1)=math_mul33x33(N(1:3,1:3,2),N(1:3,1:3,3))/ & + ((values(1)-values(2))*(values(1)-values(3))) + EB(1:3,1:3,2)=math_I3-EB(1:3,1:3,1) + elseif(abs(values(3)-values(1)) < TOL) then twoSimilarEigenvalues + EB(1:3,1:3,2)=math_mul33x33(N(1:3,1:3,1),N(1:3,1:3,3))/ & + ((values(2)-values(1))*(values(2)-values(3))) + EB(1:3,1:3,1)=math_I3-EB(1:3,1:3,2) + else twoSimilarEigenvalues + EB(1:3,1:3,1)=math_mul33x33(N(1:3,1:3,2),N(1:3,1:3,3))/ & + ((values(1)-values(2))*(values(1)-values(3))) + EB(1:3,1:3,2)=math_mul33x33(N(1:3,1:3,1),N(1:3,1:3,3))/ & + ((values(2)-values(1))*(values(2)-values(3))) + EB(1:3,1:3,3)=math_mul33x33(N(1:3,1:3,1),N(1:3,1:3,2))/ & + ((values(3)-values(1))*(values(3)-values(2))) + endif twoSimilarEigenvalues + endif threeSimilarEigenvalues + + math_eigenvectorBasisSym33 = sqrt(values(1)) * EB(1:3,1:3,1) & + + sqrt(values(2)) * EB(1:3,1:3,2) & + + sqrt(values(3)) * EB(1:3,1:3,3) + +end function math_eigenvectorBasisSym33 + + +!-------------------------------------------------------------------------------------------------- +!> @brief rotational part from polar decomposition of 33 tensor m !-------------------------------------------------------------------------------------------------- function math_rotationalPart33(m) use IO, only: & @@ -2002,17 +2094,11 @@ function math_rotationalPart33(m) implicit none real(pReal), intent(in), dimension(3,3) :: m real(pReal), dimension(3,3) :: math_rotationalPart33 - real(pReal), dimension(3,3) :: U, mTm , Uinv, EB - real(pReal), dimension(3) :: EV - - mTm = math_mul33x33(math_transpose33(m),m) - call math_spectralDecompositionSym33(mTm,EV,EB) - - U = sqrt(EV(1)) * math_tensorproduct33(EB(1:3,1),EB(1:3,1)) & - + sqrt(EV(2)) * math_tensorproduct33(EB(1:3,2),EB(1:3,2)) & - + sqrt(EV(3)) * math_tensorproduct33(EB(1:3,3),EB(1:3,3)) + real(pReal), dimension(3,3) :: U , Uinv + U = math_eigenvectorBasisSym33(math_mul33x33(transpose(m),m)) Uinv = math_inv33(U) + if (all(abs(Uinv) <= tiny(Uinv))) then ! math_inv33 returns zero when failed, avoid floating point equality comparison math_rotationalPart33 = math_I3 call IO_warning(650_pInt) @@ -2035,16 +2121,13 @@ function math_eigenvaluesSym(m) real(pReal), dimension(:,:), intent(in) :: m real(pReal), dimension(size(m,1)) :: math_eigenvaluesSym real(pReal), dimension(size(m,1),size(m,1)) :: vectors - integer(pInt) :: info real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f + external :: & + dsyev vectors = m ! copy matrix to input (doubles as output) array -#if(FLOAT==8) call dsyev('N','U',size(m,1),vectors,size(m,1),math_eigenvaluesSym,work,(64+2)*size(m,1),info) -#elif(FLOAT==4) - call ssyev('N','U',size(m,1),vectors,size(m,1),math_eigenvaluesSym,work,(64+2)*size(m,1),info) -#endif if (info /= 0_pInt) math_eigenvaluesSym = DAMASK_NaN end function math_eigenvaluesSym @@ -2070,7 +2153,7 @@ function math_eigenvaluesSym33(m) P = invariants(2)-invariants(1)**2.0_pReal/3.0_pReal ! different from http://arxiv.org/abs/physics/0610206 (this formulation was in DAMASK) Q = -2.0_pReal/27.0_pReal*invariants(1)**3.0_pReal+product(invariants(1:2))/3.0_pReal-invariants(3)! different from http://arxiv.org/abs/physics/0610206 (this formulation was in DAMASK) - if(any(abs([p,q]) < TOL)) then + if(all(abs([P,Q]) < TOL)) then math_eigenvaluesSym33 = math_eigenvaluesSym(m) else rho=sqrt(-3.0_pReal*P**3.0_pReal)/9.0_pReal diff --git a/src/mesh.f90 b/src/mesh.f90 index 1cd80a625..29bb15aef 100644 --- a/src/mesh.f90 +++ b/src/mesh.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Koords, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/numerics.f90 b/src/numerics.f90 index 61f326c02..365b078ec 100644 --- a/src/numerics.f90 +++ b/src/numerics.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief Managing of parameters related to numerics diff --git a/src/plastic_disloUCLA.f90 b/src/plastic_disloUCLA.f90 index d95a5e6a4..77fd84900 100644 --- a/src/plastic_disloUCLA.f90 +++ b/src/plastic_disloUCLA.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author David Cereceda, Lawrence Livermore National Laboratory @@ -1207,13 +1205,11 @@ subroutine plastic_disloUCLA_LpAndItsTangent(Lp,dLp_dTstar99,Tstar_v,Temperature math_Plain3333to99, & math_Mandel6to33, & math_Mandel33to6, & - math_spectralDecompositionSym33, & math_symmetric33, & math_mul33x3 use material, only: & material_phase, & phase_plasticityInstance, & - !plasticState, & phaseAt, phasememberAt use lattice, only: & lattice_Sslip, & diff --git a/src/plastic_dislotwin.f90 b/src/plastic_dislotwin.f90 index 532312bfd..b9ffde804 100644 --- a/src/plastic_dislotwin.f90 +++ b/src/plastic_dislotwin.f90 @@ -1,7 +1,3 @@ -!-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incoprorating dislocation and twinning physics !> @details to be done @@ -1637,7 +1633,7 @@ subroutine plastic_dislotwin_LpAndItsTangent(Lp,dLp_dTstar99,Tstar_v,Temperature math_Plain3333to99, & math_Mandel6to33, & math_Mandel33to6, & - math_spectralDecompositionSym, & + math_eigenValuesVectorsSym, & math_tensorproduct33, & math_symmetric33, & math_mul33x3 @@ -1783,7 +1779,7 @@ subroutine plastic_dislotwin_LpAndItsTangent(Lp,dLp_dTstar99,Tstar_v,Temperature abs(plastic_dislotwin_sbResistance(instance)) > tiny(0.0_pReal)) then gdot_sb = 0.0_pReal dgdot_dtausb = 0.0_pReal - call math_spectralDecompositionSym(math_Mandel6to33(Tstar_v),eigValues,eigVectors,error) + call math_eigenValuesVectorsSym(math_Mandel6to33(Tstar_v),eigValues,eigVectors,error) do j = 1_pInt,6_pInt sb_s = 0.5_pReal*sqrt(2.0_pReal)*math_mul33x3(eigVectors,sb_sComposition(1:3,j)) sb_m = 0.5_pReal*sqrt(2.0_pReal)*math_mul33x3(eigVectors,sb_mComposition(1:3,j)) @@ -2007,7 +2003,7 @@ subroutine plastic_dislotwin_dotState(Tstar_v,Temperature,ipc,ip,el) !* Total twin volume fraction sumf = sum(state(instance)%twinFraction(1_pInt:nt,of)) ! safe for nt == 0 - plasticState(instance)%dotState(:,of) = 0.0_pReal + plasticState(ph)%dotState(:,of) = 0.0_pReal !* Total transformed volume fraction sumftr = sum(state(instance)%stressTransFraction(1_pInt:nr,of)) + & @@ -2197,8 +2193,8 @@ function plastic_dislotwin_postResults(Tstar_v,Temperature,ipc,ip,el) use math, only: & pi, & math_Mandel6to33, & - math_eigenvaluesSym33, & - math_spectralDecompositionSym33 + math_eigenValuesSym33, & + math_eigenValuesVectorsSym33 use material, only: & material_phase, & phase_plasticityInstance,& @@ -2519,7 +2515,7 @@ function plastic_dislotwin_postResults(Tstar_v,Temperature,ipc,ip,el) plastic_dislotwin_postResults(c+1_pInt:c+3_pInt) = math_eigenvaluesSym33(math_Mandel6to33(Tstar_v)) c = c + 3_pInt case (sb_eigenvectors_ID) - call math_spectralDecompositionSym33(math_Mandel6to33(Tstar_v),eigValues,eigVectors) + call math_eigenValuesVectorsSym33(math_Mandel6to33(Tstar_v),eigValues,eigVectors) plastic_dislotwin_postResults(c+1_pInt:c+9_pInt) = reshape(eigVectors,[9]) c = c + 9_pInt case (stress_trans_fraction_ID) @@ -2539,4 +2535,4 @@ function plastic_dislotwin_postResults(Tstar_v,Temperature,ipc,ip,el) enddo end function plastic_dislotwin_postResults -end module plastic_dislotwin \ No newline at end of file +end module plastic_dislotwin diff --git a/src/plastic_isotropic.f90 b/src/plastic_isotropic.f90 index 13481b9a7..132a0abb1 100644 --- a/src/plastic_isotropic.f90 +++ b/src/plastic_isotropic.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for isotropic (ISOTROPIC) plasticity @@ -9,14 +7,11 @@ !! untextured polycrystal !-------------------------------------------------------------------------------------------------- module plastic_isotropic -#ifdef HDF - use hdf5, only: & - HID_T -#endif use prec, only: & pReal,& - pInt + pInt, & + DAMASK_NaN implicit none private @@ -42,22 +37,22 @@ module plastic_isotropic integer(kind(undefined_ID)), allocatable, dimension(:) :: & outputID real(pReal) :: & - fTaylor, & - tau0, & - gdot0, & - n, & - h0, & - h0_slopeLnRate, & - tausat, & - a, & - aTolFlowstress, & - aTolShear , & - tausat_SinhFitA, & - tausat_SinhFitB, & - tausat_SinhFitC, & - tausat_SinhFitD + fTaylor = DAMASK_NaN, & + tau0 = DAMASK_NaN, & + gdot0 = DAMASK_NaN, & + n = DAMASK_NaN, & + h0 = DAMASK_NaN, & + h0_slopeLnRate = 0.0_pReal, & + tausat = DAMASK_NaN, & + a = DAMASK_NaN, & + aTolFlowstress = 1.0_pReal, & + aTolShear = 1.0e-6_pReal, & + tausat_SinhFitA= 0.0_pReal, & + tausat_SinhFitB= 0.0_pReal, & + tausat_SinhFitC= 0.0_pReal, & + tausat_SinhFitD= 0.0_pReal logical :: & - dilatation + dilatation = .false. end type type(tParameters), dimension(:), allocatable, private :: param !< containers of constitutive parameters (len Ninstance) @@ -169,7 +164,7 @@ subroutine plastic_isotropic_init(fileUnit) allocate(plastic_isotropic_Noutput(maxNinstance), source=0_pInt) allocate(param(maxNinstance)) ! one container of parameters per instance - + rewind(fileUnit) phase = 0_pInt do while (trim(line) /= IO_EOF .and. IO_lc(IO_getTag(line,'<','>')) /= material_partPhase) ! wind forward to @@ -186,14 +181,13 @@ subroutine plastic_isotropic_init(fileUnit) if (IO_getTag(line,'[',']') /= '') then ! next section phase = phase + 1_pInt ! advance section counter if (phase_plasticity(phase) == PLASTICITY_ISOTROPIC_ID) then - instance = phase_plasticityInstance(phase) - + instance = phase_plasticityInstance(phase) ! count instances of my constitutive law + allocate(param(instance)%outputID(phase_Noutput(phase))) ! allocate space for IDs of every requested output endif cycle ! skip to next line endif if (phase > 0_pInt) then; if (phase_plasticity(phase) == PLASTICITY_ISOTROPIC_ID) then ! one of my phases. Do not short-circuit here (.and. between if-statements), it's not safe in Fortran instance = phase_plasticityInstance(phase) ! which instance of my plasticity is present phase - allocate(param(instance)%outputID(phase_Noutput(phase))) ! allocate space for IDs of every requested output chunkPos = IO_stringPos(line) tag = IO_lc(IO_stringValue(line,chunkPos,1_pInt)) ! extract key extmsg = trim(tag)//' ('//PLASTICITY_ISOTROPIC_label//')' ! prepare error message identifier @@ -477,7 +471,8 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e implicit none real(pReal), dimension(3,3), intent(out) :: & Li !< plastic velocity gradient - + real(pReal), dimension(3,3,3,3), intent(out) :: & + dLi_dTstar_3333 !< derivative of Li with respect to Tstar as 4th order tensor real(pReal), dimension(6), intent(in) :: & Tstar_v !< 2nd Piola Kirchhoff stress tensor in Mandel notation integer(pInt), intent(in) :: & @@ -487,9 +482,7 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e real(pReal), dimension(3,3) :: & Tstar_sph_33 !< sphiatoric part of the 2nd Piola Kirchhoff stress tensor as 2nd order tensor - real(pReal), dimension(3,3,3,3), intent(out) :: & - dLi_dTstar_3333 !< derivative of Li with respect to Tstar as 4th order tensor - real(pReal) :: & +real(pReal) :: & gamma_dot, & !< strainrate norm_Tstar_sph, & !< euclidean norm of Tstar_sph squarenorm_Tstar_sph !< square of the euclidean norm of Tstar_sph @@ -526,6 +519,9 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e dLi_dTstar_3333 = gamma_dot / param(instance)%fTaylor * & dLi_dTstar_3333 / norm_Tstar_sph endif + else + Li = 0.0_pReal + dLi_dTstar_3333 = 0.0_pReal endif end subroutine plastic_isotropic_LiAndItsTangent diff --git a/src/plastic_j2.f90 b/src/plastic_j2.f90 index 89c022cc9..4138aea25 100644 --- a/src/plastic_j2.f90 +++ b/src/plastic_j2.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for isotropic (J2) plasticity @@ -209,9 +207,6 @@ subroutine plastic_j2_init(fileUnit) phase = phase + 1_pInt ! advance section counter if (phase_plasticity(phase) == PLASTICITY_J2_ID) then instance = phase_plasticityInstance(phase) -#ifdef HDF - outID(instance)=HDF5_addGroup(str1,tempResults) -#endif endif cycle ! skip to next line endif @@ -228,21 +223,11 @@ subroutine plastic_j2_init(fileUnit) plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = flowstress_ID plastic_j2_output(plastic_j2_Noutput(instance),instance) = & IO_lc(IO_stringValue(line,chunkPos,2_pInt)) -#ifdef HDF - call HDF5_addScalarDataset(outID(instance),myConstituents,'flowstress','MPa') - allocate(plastic_j2_Output2(instance)%flowstress(myConstituents)) - plastic_j2_Output2(instance)%flowstressActive = .true. -#endif case ('strainrate') plastic_j2_Noutput(instance) = plastic_j2_Noutput(instance) + 1_pInt plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = strainrate_ID plastic_j2_output(plastic_j2_Noutput(instance),instance) = & IO_lc(IO_stringValue(line,chunkPos,2_pInt)) -#ifdef HDF - call HDF5_addScalarDataset(outID(instance),myConstituents,'strainrate','1/s') - allocate(plastic_j2_Output2(instance)%strainrate(myConstituents)) - plastic_j2_Output2(instance)%strainrateActive = .true. -#endif case default end select diff --git a/src/plastic_none.f90 b/src/plastic_none.f90 index f624a80a2..a9007667f 100644 --- a/src/plastic_none.f90 +++ b/src/plastic_none.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for purely elastic material diff --git a/src/plastic_nonlocal.f90 b/src/plastic_nonlocal.f90 index 1922c08e2..b699c57ed 100644 --- a/src/plastic_nonlocal.f90 +++ b/src/plastic_nonlocal.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/plastic_phenoplus.f90 b/src/plastic_phenoplus.f90 index 0a40edd84..0887da239 100644 --- a/src/plastic_phenoplus.f90 +++ b/src/plastic_phenoplus.f90 @@ -1,9 +1,7 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH -!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH -!> @author Chen Zhang, Michigan State University +!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH +!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH +!> @author Chen Zhang, Michigan State University !> @brief material subroutine for phenomenological crystal plasticity formulation using a powerlaw !... fitting !-------------------------------------------------------------------------------------------------- diff --git a/src/plastic_phenopowerlaw.f90 b/src/plastic_phenopowerlaw.f90 index 1f8e16250..63e078329 100644 --- a/src/plastic_phenopowerlaw.f90 +++ b/src/plastic_phenopowerlaw.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for phenomenological crystal plasticity formulation using a powerlaw diff --git a/src/plastic_titanmod.f90 b/src/plastic_titanmod.f90 index abc6d661b..24bf543b7 100644 --- a/src/plastic_titanmod.f90 +++ b/src/plastic_titanmod.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Alankar Alankar, Max-Planck-Institut für Eisenforschung GmbH !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/porosity_none.f90 b/src/porosity_none.f90 index 69f10a5c6..92d4d9fe5 100644 --- a/src/porosity_none.f90 +++ b/src/porosity_none.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for constant porosity !-------------------------------------------------------------------------------------------------- diff --git a/src/porosity_phasefield.f90 b/src/porosity_phasefield.f90 index dc8b82b76..ec7e4c341 100644 --- a/src/porosity_phasefield.f90 +++ b/src/porosity_phasefield.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for phase field modelling of pore nucleation and growth !> @details phase field model for pore nucleation and growth based on vacancy clustering diff --git a/src/prec.f90 b/src/prec.f90 index e39a32cfa..e099c8964 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -1,14 +1,12 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH -!> @brief setting precision for real and int type depending on makros "FLOAT" and "INT" +!> @brief setting precision for real and int type !> @details setting precision for real and int type and for DAMASK_NaN. Definition is made -!! depending on makros "FLOAT" and "INT" defined during compilation +!! depending on makro "INT" defined during compilation !! for details on NaN see https://software.intel.com/en-us/forums/topic/294680 !-------------------------------------------------------------------------------------------------- module prec @@ -20,18 +18,7 @@ module prec implicit none private -#if (FLOAT==4) -#if defined(Spectral) || defined(FEM) - SPECTRAL SOLVER AND OWN FEM DO NOT SUPPORT SINGLE PRECISION, STOPPING COMPILATION -#endif - integer, parameter, public :: pReal = 4 !< floating point single precition (was selected_real_kind(6,37), number with 6 significant digits, up to 1e+-37) -#ifdef __INTEL_COMPILER - real(pReal), parameter, public :: DAMASK_NaN = Z'7F800001' !< quiet NaN for single precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran) -#endif -#ifdef __GFORTRAN__ - real(pReal), parameter, public :: DAMASK_NaN = real(Z'7F800001', pReal) !< quiet NaN for single precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran) -#endif -#elif (FLOAT==8) +#if (FLOAT==8) integer, parameter, public :: pReal = 8 !< floating point double precision (was selected_real_kind(15,300), number with 15 significant digits, up to 1e+-300) #ifdef __INTEL_COMPILER real(pReal), parameter, public :: DAMASK_NaN = Z'7FF8000000000000' !< quiet NaN for double precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran) @@ -126,7 +113,9 @@ module prec public :: & prec_init, & - prec_isNaN + prec_isNaN, & + dEq, & + dNeq contains @@ -174,9 +163,9 @@ end subroutine prec_init !-------------------------------------------------------------------------------------------------- !> @brief figures out if a floating point number is NaN -! basically just a small wrapper, because gfortran < 4.9 does not have the IEEE module +! basically just a small wrapper, because gfortran < 5.0 does not have the IEEE module !-------------------------------------------------------------------------------------------------- -logical elemental function prec_isNaN(a) +logical elemental pure function prec_isNaN(a) implicit none real(pReal), intent(in) :: a @@ -189,4 +178,30 @@ logical elemental function prec_isNaN(a) #endif end function prec_isNaN + +!-------------------------------------------------------------------------------------------------- +!> @brief equality comparison for double precision +! replaces "==" but for certain (relative) tolerance. Counterpart to dNeq +! http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm +!-------------------------------------------------------------------------------------------------- +logical elemental pure function dEq(a,b,tol) + real(pReal), intent(in) :: a,b + real(pReal), intent(in), optional :: tol + real(pReal), parameter :: eps = 2.2204460492503131E-16 ! DBL_EPSILON in C + dEq = merge(.True., .False.,abs(a-b) <= merge(tol,eps,present(tol))*maxval(abs([a,b]))) +end function dEq + + +!-------------------------------------------------------------------------------------------------- +!> @brief inequality comparison for double precision +! replaces "!=" but for certain (relative) tolerance. Counterpart to dEq +! http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm +!-------------------------------------------------------------------------------------------------- +logical elemental pure function dNeq(a,b,tol) + real(pReal), intent(in) :: a,b + real(pReal), intent(in), optional :: tol + real(pReal), parameter :: eps = 2.2204460492503131E-16 ! DBL_EPSILON in C + dNeq = merge(.False., .True.,abs(a-b) <= merge(tol,eps,present(tol))*maxval(abs([a,b]))) +end function dNeq + end module prec diff --git a/src/source_damage_anisoBrittle.f90 b/src/source_damage_anisoBrittle.f90 index a751eefdc..efd76091e 100644 --- a/src/source_damage_anisoBrittle.f90 +++ b/src/source_damage_anisoBrittle.f90 @@ -1,8 +1,6 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Luv Sharma, Max-Planck-Institut fŸr Eisenforschung GmbH -!> @author Pratheek Shanthraj, Max-Planck-Institut fŸr Eisenforschung GmbH +!> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH +!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating anisotropic brittle damage source mechanism !> @details to be done !-------------------------------------------------------------------------------------------------- diff --git a/src/source_damage_anisoDuctile.f90 b/src/source_damage_anisoDuctile.f90 index 028fd479a..72480382a 100644 --- a/src/source_damage_anisoDuctile.f90 +++ b/src/source_damage_anisoDuctile.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incorporating anisotropic ductile damage source mechanism diff --git a/src/source_damage_isoBrittle.f90 b/src/source_damage_isoBrittle.f90 index c063ae86f..1603ecf48 100644 --- a/src/source_damage_isoBrittle.f90 +++ b/src/source_damage_isoBrittle.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incoprorating isotropic brittle damage source mechanism diff --git a/src/source_damage_isoDuctile.f90 b/src/source_damage_isoDuctile.f90 index b0290264c..3a85bab24 100644 --- a/src/source_damage_isoDuctile.f90 +++ b/src/source_damage_isoDuctile.f90 @@ -1,8 +1,6 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Pratheek Shanthraj, Max-Planck-Institut fŸr Eisenforschung GmbH -!> @author Luv Sharma, Max-Planck-Institut fŸr Eisenforschung GmbH +!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH +!> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine incoprorating isotropic ductile damage source mechanism !> @details to be done !-------------------------------------------------------------------------------------------------- diff --git a/src/source_thermal_dissipation.f90 b/src/source_thermal_dissipation.f90 index 83ad85167..dd6453db0 100644 --- a/src/source_thermal_dissipation.f90 +++ b/src/source_thermal_dissipation.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for thermal source due to plastic dissipation !> @details to be done diff --git a/src/source_thermal_externalheat.f90 b/src/source_thermal_externalheat.f90 index 257012c06..203826205 100644 --- a/src/source_thermal_externalheat.f90 +++ b/src/source_thermal_externalheat.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for thermal source due to plastic dissipation !> @details to be done diff --git a/src/source_vacancy_irradiation.f90 b/src/source_vacancy_irradiation.f90 index c4bcfba04..fd7220020 100644 --- a/src/source_vacancy_irradiation.f90 +++ b/src/source_vacancy_irradiation.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for vacancy generation due to irradiation !> @details to be done diff --git a/src/source_vacancy_phenoplasticity.f90 b/src/source_vacancy_phenoplasticity.f90 index f9e766b2c..2690cf691 100644 --- a/src/source_vacancy_phenoplasticity.f90 +++ b/src/source_vacancy_phenoplasticity.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for vacancy generation due to plasticity !> @details to be done diff --git a/src/source_vacancy_thermalfluc.f90 b/src/source_vacancy_thermalfluc.f90 index c86406430..5891ff764 100644 --- a/src/source_vacancy_thermalfluc.f90 +++ b/src/source_vacancy_thermalfluc.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for vacancy generation due to thermal fluctuations !> @details to be done diff --git a/src/spectral_damage.f90 b/src/spectral_damage.f90 index 0b79d5e5d..e2bb80b31 100644 --- a/src/spectral_damage.f90 +++ b/src/spectral_damage.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id: spectral_damage.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Shaokang Zhang, Max-Planck-Institut für Eisenforschung GmbH !> @brief Spectral solver for nonlocal damage diff --git a/src/spectral_interface.f90 b/src/spectral_interface.f90 index b24c5f747..ad0ee4082 100644 --- a/src/spectral_interface.f90 +++ b/src/spectral_interface.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief Interfacing between the spectral solver and the material subroutines provided diff --git a/src/spectral_mech_AL.f90 b/src/spectral_mech_AL.f90 index a937dcc86..b6a8c9353 100644 --- a/src/spectral_mech_AL.f90 +++ b/src/spectral_mech_AL.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/spectral_mech_Basic.f90 b/src/spectral_mech_Basic.f90 index a8344fabe..358a095d1 100644 --- a/src/spectral_mech_Basic.f90 +++ b/src/spectral_mech_Basic.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/spectral_mech_Polarisation.f90 b/src/spectral_mech_Polarisation.f90 index a28eb5adb..d7f1599e5 100644 --- a/src/spectral_mech_Polarisation.f90 +++ b/src/spectral_mech_Polarisation.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH diff --git a/src/spectral_thermal.f90 b/src/spectral_thermal.f90 index 843642394..83e290a4c 100644 --- a/src/spectral_thermal.f90 +++ b/src/spectral_thermal.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id: spectral_thermal.f90 4082 2015-04-11 20:28:07Z MPIE\m.diehl $ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Shaokang Zhang, Max-Planck-Institut für Eisenforschung GmbH !> @brief Spectral solver for thermal conduction diff --git a/src/spectral_utilities.f90 b/src/spectral_utilities.f90 index bde088ccb..8344ff7ce 100644 --- a/src/spectral_utilities.f90 +++ b/src/spectral_utilities.f90 @@ -1,7 +1,3 @@ -!-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- -!> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH !> @brief Utilities used by the different spectral solver variants @@ -1132,6 +1128,8 @@ end function utilities_forwardField !-------------------------------------------------------------------------------------------------- !> @brief calculates filter for fourier convolution depending on type given in numerics.config +!> @details this is the full operator to calculate derivatives, i.e. 2 \pi i k for the +! standard approach !-------------------------------------------------------------------------------------------------- pure function utilities_getFreqDerivative(k_s) use math, only: & @@ -1141,7 +1139,7 @@ pure function utilities_getFreqDerivative(k_s) grid implicit none - integer(pInt), intent(in), dimension(3) :: k_s !< indices of frequency + integer(pInt), intent(in), dimension(3) :: k_s !< indices of frequency complex(pReal), dimension(3) :: utilities_getFreqDerivative select case (spectral_derivative_ID) diff --git a/src/thermal_adiabatic.f90 b/src/thermal_adiabatic.f90 index 7bb8620e7..e2d26fbb1 100644 --- a/src/thermal_adiabatic.f90 +++ b/src/thermal_adiabatic.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for adiabatic temperature evolution !> @details to be done diff --git a/src/thermal_conduction.f90 b/src/thermal_conduction.f90 index 2f9b766eb..c85923050 100644 --- a/src/thermal_conduction.f90 +++ b/src/thermal_conduction.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for temperature evolution from heat conduction !> @details to be done diff --git a/src/thermal_isothermal.f90 b/src/thermal_isothermal.f90 index 8c9d3a782..a3ac8f685 100644 --- a/src/thermal_isothermal.f90 +++ b/src/thermal_isothermal.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for isothermal temperature field !-------------------------------------------------------------------------------------------------- diff --git a/src/vacancyflux_cahnhilliard.f90 b/src/vacancyflux_cahnhilliard.f90 index 16a380ffc..be68e24a0 100644 --- a/src/vacancyflux_cahnhilliard.f90 +++ b/src/vacancyflux_cahnhilliard.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for conservative transport of vacancy concentration field !> @details to be done diff --git a/src/vacancyflux_isochempot.f90 b/src/vacancyflux_isochempot.f90 index 35db8d159..286eb37b7 100644 --- a/src/vacancyflux_isochempot.f90 +++ b/src/vacancyflux_isochempot.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for locally evolving vacancy concentration !> @details to be done diff --git a/src/vacancyflux_isoconc.f90 b/src/vacancyflux_isoconc.f90 index 63cfb1b62..c32cb648d 100644 --- a/src/vacancyflux_isoconc.f90 +++ b/src/vacancyflux_isoconc.f90 @@ -1,6 +1,4 @@ !-------------------------------------------------------------------------------------------------- -! $Id$ -!-------------------------------------------------------------------------------------------------- !> @author Pratheek Shanthraj, Max-Planck-Institut für Eisenforschung GmbH !> @brief material subroutine for constant vacancy concentration !--------------------------------------------------------------------------------------------------