outsourced multiple repetitive functions into ASCIItable class.

changed ASCIItable API from file-handles to filenames.

adopted these changes in pre and post processing scripts.

unified behavior and look.

fixed bugs here and there.

improved functionality.
This commit is contained in:
Philip Eisenlohr 2015-08-07 19:03:26 +00:00
parent 563d9e64dd
commit d4e748b654
52 changed files with 4015 additions and 4152 deletions

View File

@ -12,40 +12,60 @@ class ASCIItable():
__slots__ = ['__IO__', __slots__ = ['__IO__',
'info', 'info',
'labels', 'labeled',
'data', 'data',
] ]
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def __init__(self, def __init__(self,
fileIn = sys.stdin, name = 'STDIN',
fileOut = sys.stdout, outname = None,
buffered = False, # flush writes buffered = False, # flush writes
labels = True): # assume table has labels labeled = True, # assume table has labels
self.__IO__ = {'in': fileIn, readonly = False, # no reading from file
'out':fileOut, writeonly = False, # no writing to file
'output':[], ):
self.__IO__ = {'output': [],
'buffered': buffered, 'buffered': buffered,
'labels':labels, 'labeled': labeled, # header contains labels
'validReadSize': 0, 'labels': [], # labels according to file info
'readBuffer': [], # buffer to hold non-advancing reads 'readBuffer': [], # buffer to hold non-advancing reads
'dataStart': 0, 'dataStart': 0,
} }
self.__IO__ .update({'in': sys.stdin,
'out': sys.stdout,
} if name == 'STDIN' else
{'in': sys.stdin if writeonly else open(name,'r') ,
'out': sys.stdout if readonly else open(outname,'w'),
}
)
self.info = [] self.info = []
self.labels = [] self.labels = []
self.data = [] self.data = []
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def _transliterateToFloat(self,x): def _transliterateToFloat(self,
x):
try: try:
return float(x) return float(x)
except: except:
return 0.0 return 0.0
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def close(self,dismiss = False): def croak(self,
what, newline = True):
sys.stderr.write(('\n'.join(map(str,what)) if not hasattr(what, "strip")
and hasattr(what, "__getitem__")
or hasattr(what, "__iter__") else str(what))
+('\n' if newline else '')),
# ------------------------------------------------------------------
def close(self,
dismiss = False):
self.input_close() self.input_close()
self.output_flush()
self.output_close(dismiss) self.output_close(dismiss)
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@ -86,7 +106,8 @@ class ASCIItable():
self.__IO__['output'] = [] self.__IO__['output'] = []
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def output_close(self, dismiss = False): def output_close(self,
dismiss = False):
try: try:
self.__IO__['out'].close() self.__IO__['out'].close()
except: except:
@ -96,50 +117,95 @@ class ASCIItable():
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def head_read(self): def head_read(self):
''' '''
get column labels by either read the first row, or get column labels by either reading
--if keyword "head[*]" is present-- the last line of the header the first row or, if keyword "head[*]" is present,
the last line of the header
''' '''
import re import re
try: try:
self.__IO__['in'].seek(0) self.__IO__['in'].seek(0)
except: except:
pass pass
firstline = self.__IO__['in'].readline() firstline = self.__IO__['in'].readline()
m = re.search('(\d+)\s+head', firstline.lower()) m = re.search('(\d+)\s+head', firstline.lower()) # search for "head" keyword
if self.__IO__['labels']: # table features labels if self.__IO__['labeled']: # table features labels
if m: # found header info if m: # found header info
self.info = [self.__IO__['in'].readline().strip() for i in xrange(1,int(m.group(1)))] self.info = [self.__IO__['in'].readline().strip() for i in xrange(1,int(m.group(1)))]
self.labels = self.__IO__['in'].readline().split() self.labels = self.__IO__['in'].readline().split() # store labels found in last line
else: # no header info (but labels) else: # no header info (but labels)
self.labels = firstline.split() self.labels = firstline.split() # store labels from first line
self.__IO__['validReadSize'] = len(self.labels) self.__IO__['labels'] = list(self.labels) # backup labels (make COPY, not link)
else: # no labels present in table else: # no labels present in table
if m: # found header info if m: # found header info
self.info = [self.__IO__['in'].readline().strip() for i in xrange(0,int(m.group(1)))] # all header is info self.info = [self.__IO__['in'].readline().strip() for i in xrange(0,int(m.group(1)))] # all header is info ...
# ... without any labels # ... without any labels
else: # otherwise file starts with data right away
try:
self.__IO__['in'].seek(0) # try to rewind
except:
self.__IO__['readBuffer'] = firstline # or at least save data in buffer
try: try:
self.__IO__['dataStart'] = self.__IO__['in'].tell() # current file position is at start of data self.__IO__['dataStart'] = self.__IO__['in'].tell() # current file position is at start of data
except(IOError): except(IOError):
pass pass
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def head_write(self): def head_write(self,
header = True):
''' '''
write current header information (info + labels) write current header information (info + labels)
''' '''
if self.__IO__['labels']:
return self.output_write ([ head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else []
'%i\theader'%(len(self.info)+1), head.append(self.info)
self.info, if self.__IO__['labeled']: head.append('\t'.join(self.labels))
'\t'.join(self.labels),
]) return self.output_write(head)
# ------------------------------------------------------------------
def head_getGeom(self):
'''
interpret geom header
'''
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
extra_header = []
for header in self.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue # skip blank lines
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else: else:
return self.output_write ([ info[headitems[0]] = mappings[headitems[0]](headitems[1])
'%i\theader'%(len(self.info)), else:
self.info, extra_header.append(header)
])
return info,extra_header
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def labels_append(self, def labels_append(self,
@ -155,7 +221,7 @@ class ASCIItable():
else: else:
self.labels += [what] self.labels += [what]
self.__IO__['labels'] = True # switch on processing (in particular writing) of labels self.__IO__['labeled'] = True # switch on processing (in particular writing) of labels
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def labels_clear(self): def labels_clear(self):
@ -163,7 +229,7 @@ class ASCIItable():
delete existing labels and switch to no labeling delete existing labels and switch to no labeling
''' '''
self.labels = [] self.labels = []
self.__IO__['labels'] = False self.__IO__['labeled'] = False
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def label_index(self, def label_index(self,
@ -255,6 +321,23 @@ class ASCIItable():
return np.array(dim) if isinstance(dim,list) else dim return np.array(dim) if isinstance(dim,list) else dim
# ------------------------------------------------------------------
def label_indexrange(self,
labels):
'''
tell index range for given label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
'''
from collections import Iterable
start = self.label_index(labels)
dim = self.label_dimension(labels)
return map(lambda a,b: xrange(a,a+b), zip(start,dim)) if isinstance(labels, Iterable) and not isinstance(labels, str) \
else xrange(start,start+dim)
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def info_append(self, def info_append(self,
what): what):
@ -280,9 +363,12 @@ class ASCIItable():
def data_rewind(self): def data_rewind(self):
self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section
self.__IO__['readBuffer'] = [] # delete any non-advancing data reads self.__IO__['readBuffer'] = [] # delete any non-advancing data reads
self.labels = list(self.__IO__['labels']) # restore label info found in header (as COPY, not link)
self.__IO__['labeled'] = len(self.labels) > 0
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_skipLines(self,count): def data_skipLines(self,
count):
''' '''
wind forward by count number of lines wind forward by count number of lines
''' '''
@ -292,7 +378,8 @@ class ASCIItable():
return alive return alive
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_read(self,advance = True): def data_read(self,
advance = True):
''' '''
read next line (possibly buffered) and parse it into data array read next line (possibly buffered) and parse it into data array
''' '''
@ -304,24 +391,14 @@ class ASCIItable():
if not advance: if not advance:
self.__IO__['readBuffer'].append(line) # keep line just read in buffer self.__IO__['readBuffer'].append(line) # keep line just read in buffer
if self.__IO__['labels']: if self.__IO__['labeled']: # if table has labels
items = line.split()[:self.__IO__['validReadSize']] # use up to valid size (label count) items = line.split()[:len(self.__IO__['labels'])] # use up to label count (from original file info)
self.data = items if len(items) == self.__IO__['validReadSize'] else [] # take if correct number of entries self.data = items if len(items) == len(self.__IO__['labels']) else [] # take entries if correct number, i.e. not too few compared to label count
else: else:
self.data = line.split() # take all self.data = line.split() # otherwise take all
return self.data != [] return self.data != []
# ------------------------------------------------------------------
def data_readLine(self,line):
'''
seek beginning of data and wind forward to selected line
'''
self.__IO__['in'].seek(self.__IO__['dataStart'])
for i in xrange(line-1):
self.__IO__['in'].readline()
self.data_read()
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_readArray(self, def data_readArray(self,
labels = []): labels = []):
@ -329,36 +406,37 @@ class ASCIItable():
read whole data of all (given) labels as numpy array read whole data of all (given) labels as numpy array
''' '''
if not isinstance(labels,list): try:
labels = [labels] self.data_rewind() # try to wind back to start of data
if labels == [None] or labels == []: except:
pass # assume/hope we are at data start already...
if labels == None or labels == []:
use = None # use all columns (and keep labels intact) use = None # use all columns (and keep labels intact)
labels_missing = [] labels_missing = []
else: else:
indices = self.label_index(labels) # check requested labels indices = self.label_index(labels) # check requested labels ...
dimensions = self.label_dimension(labels) # ... and remember their dimension
present = np.where(indices >= 0)[0] # positions in request list of labels that are present ... present = np.where(indices >= 0)[0] # positions in request list of labels that are present ...
missing = np.where(indices < 0)[0] # ... and missing in table missing = np.where(indices < 0)[0] # ... and missing in table
labels_missing = np.array(labels)[missing] # labels of missing data labels_missing = np.array(labels)[missing] # labels of missing data
columns = [] columns = []
for i,c in enumerate(indices[present]): # for all valid labels ... for i,(c,d) in enumerate(zip(indices[present],dimensions[present])): # for all valid labels ...
columns += range(c,c + \ columns += range(c,c + \
(self.label_dimension(c) if str(c) != str(labels[present[i]]) \ (d if str(c) != str(labels[present[i]]) else \
else 1)) # ... transparently add all components unless column referenced by number 1)) # ... transparently add all components unless column referenced by number or with explicit dimension
use = np.array(columns) use = np.array(columns)
self.labels = list(np.array(self.labels)[use]) if use != [] else [] # ... for missing and present columns self.labels = list(np.array(self.labels)[use]) # update labels with valid subset
self.__IO__['validReadSize'] = len(use) # update data width
try:
self.data_rewind() # try to wind back to start of data
except:
pass # assume/hope we are at data start already...
self.data = np.loadtxt(self.__IO__['in'], usecols=use,ndmin=2) self.data = np.loadtxt(self.__IO__['in'], usecols=use,ndmin=2)
return labels_missing return labels_missing
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_write(self,delimiter = '\t'): def data_write(self,
delimiter = '\t'):
''' '''
write current data array and report alive output back write current data array and report alive output back
''' '''
@ -370,7 +448,8 @@ class ASCIItable():
return self.output_write(delimiter.join(map(str,self.data))) return self.output_write(delimiter.join(map(str,self.data)))
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_writeArray(self,format = '%g',delimiter = '\t'): def data_writeArray(self,
format = '%g', delimiter = '\t'):
''' '''
write whole numpy array data write whole numpy array data
''' '''
@ -390,9 +469,12 @@ class ASCIItable():
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_set(self, def data_set(self,
what, where): what, where):
'''
update data entry in column "where". grows data array if needed.
'''
idx = -1 idx = -1
try: try:
idx = self.labels.index(where) idx = self.label_index(where)
if len(self.data) <= idx: if len(self.data) <= idx:
self.data_append(['n/a' for i in xrange(idx+1-len(self.data))]) # grow data if too short self.data_append(['n/a' for i in xrange(idx+1-len(self.data))]) # grow data if too short
self.data[idx] = str(what) self.data[idx] = str(what)
@ -408,3 +490,30 @@ class ASCIItable():
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def data_asFloat(self): def data_asFloat(self):
return map(self._transliterateToFloat,self.data) return map(self._transliterateToFloat,self.data)
# ------------------------------------------------------------------
def microstructure_read(self,
grid):
'''
read microstructure data (from .geom format)
'''
N = grid.prod() # expected number of microstructure indices in data
microstructure = np.zeros(N,'i') # initialize as flat array
i = 0
while i < N and self.data_read():
items = self.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = range(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = min(len(items), N-i) # prevent overflow of microstructure array
microstructure[i:i+s] = items[:s]
i += s
return microstructure

View File

@ -18,104 +18,111 @@ def unravel(item):
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) with derived values according to user defined arithmetic operation between column(s). Add column(s) with derived values according to user-defined arithmetic operation between column(s).
Columns can be specified either by label or index. Use ';' for ',' in functions. Column labels are tagged by '#label#' in formulas. Use ';' for ',' in functions.
Numpy is available as np. Numpy is available as np.
Example: distance to IP coordinates -- "math.sqrt( #ip.x#**2 + #ip.y#**2 + round(#ip.z#;3)**2 )" Special variables: #_row_# -- row index
Examples: (1) magnitude of vector -- "np.linalg.norm(#vec#)" (2) rounded root of row number -- "round(math.sqrt(#_row_#);3)"
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='labels', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = '(list of) new column labels') help = '(list of) new column labels')
parser.add_option('-f','--formula', dest='formulas', action='extend', metavar='<string LIST>', parser.add_option('-f','--formula',
dest = 'formulas',
action = 'extend', metavar = '<string LIST>',
help = '(list of) formulas corresponding to labels') help = '(list of) formulas corresponding to labels')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.labels == None or options.formulas == None: if options.labels == None or options.formulas == None:
parser.error('no formulas and/or labels specified') parser.error('no formulas and/or labels specified.')
elif len(options.labels) != len(options.formulas): if len(options.labels) != len(options.formulas):
parser.error('number of labels (%i) and formulas (%i) do not match'%(len(options.labels),len(options.formulas))) parser.error('number of labels ({}) and formulas ({}) do not match.'.format(len(options.labels),len(options.formulas)))
for i in xrange(len(options.formulas)): for i in xrange(len(options.formulas)):
options.formulas[i] = options.formulas[i].replace(';',',') options.formulas[i] = options.formulas[i].replace(';',',')
# ------------------------------------------ setup file handles ------------------------------------ # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files ------------------------------------------------------------------------ if filenames == []: filenames = ['STDIN']
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') for name in filenames:
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ read header -------------------------------------------
table.head_read()
# ------------------------------------------ build formulae ----------------------------------------
specials = { \ specials = { \
'_row_': 0, '_row_': 0,
} }
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
evaluator = {} evaluator = {}
brokenFormula = {} brokenFormula = {}
for label,formula in zip(options.labels,options.formulas): for label,formula in zip(options.labels,options.formulas):
interpolator = []
for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula
formula = formula.replace('#'+column+'#','%f') idx = table.label_index(column)
dim = table.label_dimension(column)
if column in specials: if column in specials:
interpolator += ['specials["%s"]'%column] replacement = 'specials["{}"]'.format(column)
elif column.isdigit(): elif dim == 1: # scalar input
if len(table.labels) > int(column): replacement = 'float(table.data[{}])'.format(idx) # take float value of data column
interpolator += ['float(table.data[%i])'%(int(column))] elif dim > 1: # multidimensional input (vector, tensor, etc.)
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
else: else:
file['croak'].write('column %s not found...\n'%column) table.croak('column {} not found...'.format(column))
brokenFormula[label] = True
else:
try:
interpolator += ['float(table.data[%i])'%table.labels.index(column)]
except:
file['croak'].write('column %s not found...\n'%column)
brokenFormula[label] = True brokenFormula[label] = True
break
formula = formula.replace('#'+column+'#',replacement)
if label not in brokenFormula: if label not in brokenFormula:
evaluator[label] = "'" + formula + "'%(" + ','.join(interpolator) + ")" evaluator[label] = formula
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
firstLine = True firstLine = True
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
specials['_row_'] += 1 # count row specials['_row_'] += 1 # count row
# ------------------------------------------ calculate one result to get length of labels --------- # ------------------------------------------ calculate one result to get length of labels ---------
if firstLine: if firstLine:
labelLen = {} firstLine = False
for label in options.labels: labelDim = {}
labelLen[label] = np.size(eval(eval(evaluator[label]))) for label in [x for x in options.labels if x not in set(brokenFormula)]:
labelDim[label] = np.size(eval(evaluator[label]))
if labelDim[label] == 0: brokenFormula[label] = True
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
for label,formula in zip(options.labels,options.formulas):
if labelLen[label] == 0:
brokenFormula[label] = True
if label not in brokenFormula:
table.labels_append(['%i_%s'%(i+1,label) for i in xrange(labelLen[label])] if labelLen[label]>1
else label)
table.head_write()
firstLine = False
for label in options.labels: table.data_append(unravel(eval(eval(evaluator[label])))) if label not in brokenFormula:
table.labels_append(['{}_{}'.format(i+1,label) for i in xrange(labelDim[label])] if labelDim[label] > 1
else label)
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
for label in [x for x in options.labels if x not in set(brokenFormula)]:
table.data_append(unravel(eval(evaluator[label])))
# table.data_append(unravel(eval(eval(evaluator[label]))))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -18,59 +18,67 @@ Add column(s) containing Cauchy stress based on given column(s) of deformation g
""", version = scriptID) """, version = scriptID)
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string', parser.add_option('-f','--defgrad',
dest = 'defgrad',
type = 'string', metavar = 'string',
help = 'heading of columns containing deformation gradient [%default]') help = 'heading of columns containing deformation gradient [%default]')
parser.add_option('-p','--stress', dest='stress', metavar='string', parser.add_option('-p','--stress',
dest = 'stress',
type = 'string', metavar = 'string',
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]') help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
parser.set_defaults(defgrad = 'f')
parser.set_defaults(stress = 'p') parser.set_defaults(defgrad = 'f',
stress = 'p',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ------------------------------------ # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process --------------------------------------------------- # ------------------------------------------ sanity checks ----------------------------------------
missingColumns = False
column={ 'defgrad': table.labels.index('1_'+options.defgrad), errors = []
'stress': table.labels.index('1_'+options.stress)} column = {}
for key in column:
if column[key]<1: for tensor in [options.defgrad,options.stress]:
file['croak'].write('column %s not found...\n'%key) dim = table.label_dimension(tensor)
missingColumns=True if dim < 0: errors.append('column {} not found.'.format(tensor))
if missingColumns: continue elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
else:
column[tensor] = table.label_index(tensor)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header -------------------------------------- # ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['%i_Cauchy'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels table.labels_append(['%i_Cauchy'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
F = np.array(map(float,table.data[column['defgrad']:column['defgrad']+9]),'d').reshape(3,3) F = np.array(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9]),'d').reshape(3,3)
P = np.array(map(float,table.data[column['stress'] :column['stress']+9]),'d').reshape(3,3) P = np.array(map(float,table.data[column[options.stress ]:column[options.stress ]+9]),'d').reshape(3,3)
table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose] table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -14,107 +14,110 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add column containing debug information. Add column(s) containing the shape and volume mismatch resulting from given deformation gradient.
Operates on periodic ordered three-dimensional data sets. Operates on periodic three-dimensional x,y,z-ordered data sets.
""", version = scriptID) """, version = scriptID)
parser.add_option('--no-shape','-s', dest='noShape', action='store_false', parser.add_option('-c','--coordinates',
help='do not calcuate shape mismatch') dest = 'coords',
parser.add_option('--no-volume','-v', dest='noVolume', action='store_false', type = 'string', metavar = 'string',
help='do not calculate volume mismatch') help = 'column heading of coordinates [%default]')
parser.add_option('-c','--coordinates', dest='coords', metavar='string', parser.add_option('-f','--defgrad',
help='column heading for coordinates [%default]') dest = 'defgrad',
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string ', type = 'string', metavar = 'string ',
help='column heading for coordinates [%default]') help = 'column heading of deformation gradient [%default]')
parser.set_defaults(coords = 'ipinitialcoord') parser.add_option('--no-shape','-s',
parser.set_defaults(defgrad = 'f') dest = 'shape',
action = 'store_false',
help = 'omit shape mismatch')
parser.add_option('--no-volume','-v',
dest = 'volume',
action = 'store_false',
help = 'omit volume mismatch')
parser.set_defaults(coords = 'ipinitialcoord',
defgrad = 'f',
shape = True,
volume = True,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ------------------------------------
files = []
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out size and grid --------------------------------------------------------- for name in filenames:
try: if not (name == 'STDIN' or os.path.exists(name)): continue
locationCol = table.labels.index('1_%s'%options.coords) # columns containing location data table = damask.ASCIItable(name = name, outname = name+'_tmp',
except ValueError: buffered = False)
try: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data (legacy naming scheme)
except ValueError: # ------------------------------------------ read header ------------------------------------------
file['croak'].write('no coordinate data (1_%s/%s.x) found...\n'%(options.coords,options.coords))
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad))
else: colF = table.label_index(options.defgrad)
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
if options.shape: table.labels_append('shapeMismatch({})'.format(options.defgrad))
if options.volume: table.labels_append('volMismatch({})'.format(options.defgrad))
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [{},{},{}] coords = [{},{},{}]
while table.data_read(): # read next data line of ASCII table for i in xrange(len(table.data)):
for j in xrange(3): for j in xrange(3):
coords[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z coords[j][str(table.data[i,colCoord+j])] = True
grid = np.array([len(coords[0]),\ grid = np.array(map(len,coords),'i')
len(coords[1]),\
len(coords[2]),],'i') # grid is number of distinct coordinates found
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness ],'d') # size from bounding box, corrected for cell-centeredness
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
N = grid.prod() N = grid.prod()
# --------------- figure out columns to process --------------------------------------------------- # ------------------------------------------ process deformation gradient --------------------------
key = '1_%s'%options.defgrad
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
continue
else:
column = table.labels.index(key) # remember columns of requested data
# ------------------------------------------ assemble header ---------------------------------------
if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad])
if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.defgrad])
table.head_write()
# ------------------------------------------ read deformation gradient field -----------------------
table.data_rewind()
F = np.zeros(N*9,'d').reshape([3,3]+list(grid))
idx = 0
while table.data_read():
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
idx += 1
F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3)
F = table.data[:,colF:colF+9].transpose().reshape([3,3]+list(options.dimension),order='F')
Favg = damask.core.math.tensorAvg(F) Favg = damask.core.math.tensorAvg(F)
centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0]) centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0])
nodes = damask.core.mesh.nodesAroundCentres(size,Favg,centres) nodes = damask.core.mesh.nodesAroundCentres(size,Favg,centres)
if not options.noShape: shapeMismatch = damask.core.mesh.shapeMismatch( size,F,nodes,centres)
if not options.noVolume: volumeMismatch = damask.core.mesh.volumeMismatch(size,F,nodes)
# ------------------------------------------ process data ------------------------------------------ stack = [table.data]
table.data_rewind() if options.shape: stack.append(damask.core.mesh.shapeMismatch( size,F,nodes,centres))
idx = 0 if options.volume: stack.append(damask.core.mesh.volumeMismatch(size,F,nodes))
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
idx += 1
if not options.noShape: table.data_append( shapeMismatch[x,y,z])
if not options.noVolume: table.data_append(volumeMismatch[x,y,z])
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.output_close() # close output ASCII table table.data_writeArray('%.12g')
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -12,11 +12,13 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
def curlFFT(geomdim,field): def curlFFT(geomdim,field):
grid = np.array(np.shape(field)[0:3]) grid = np.array(np.shape(field)[0:3])
wgt = 1.0/np.array(grid).prod() N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
wgt = 1.0/N
if len(np.shape(field)) == 4: if n == 3:
dataType = 'vector' dataType = 'vector'
elif len(np.shape(field)) == 5: elif n == 9:
dataType = 'tensor' dataType = 'tensor'
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
@ -50,11 +52,8 @@ def curlFFT(geomdim,field):
+field_fourier[i,j,k,0]*xi[2]) *TWOPIIMG +field_fourier[i,j,k,0]*xi[2]) *TWOPIIMG
curl_fourier[i,j,k,2] = ( field_fourier[i,j,k,1]*xi[0]\ curl_fourier[i,j,k,2] = ( field_fourier[i,j,k,1]*xi[0]\
-field_fourier[i,j,k,0]*xi[1]) *TWOPIIMG -field_fourier[i,j,k,0]*xi[1]) *TWOPIIMG
curl=np.fft.fftpack.irfftn(curl_fourier,axes=(0,1,2))
if dataType == 'tensor': return np.fft.fftpack.irfftn(curl_fourier,axes=(0,1,2)).reshape([N,n])
return curl.reshape([grid.prod(),9])
if dataType == 'vector':
return curl.reshape([grid.prod(),3])
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@ -68,113 +67,108 @@ Deals with both vector- and tensor-valued fields.
""", version = scriptID) """, version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string', parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar='string',
help = 'column heading for coordinates [%default]') help = 'column heading for coordinates [%default]')
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values') help = 'heading of columns containing vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(coords = 'ipinitialcoord',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.vector == None and options.tensor == None: if options.vector == None and options.tensor == None:
parser.error('no data column specified...') parser.error('no data column specified.')
datainfo = { # list of requested labels per datatype
'vector': {'shape':[3],
'len':3,
'label':[]},
'tensor': {'shape':[3,3],
'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] = options.vector
if options.tensor != None: datainfo['tensor']['label'] = options.tensor
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.data_readArray()
# --------------- figure out name of coordinate data (support for legacy .x notation) ------------- for name in filenames:
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys if not (name == 'STDIN' or os.path.exists(name)): continue
if not set(coordLabels).issubset(table.labels): table = damask.ASCIItable(name = name, outname = name+'_tmp',
directions = ['x','y','z'] buffered = False)
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not set(coordLabels).issubset(table.labels):
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
continue
coordColumns = [table.labels.index(label) for label in coordLabels]
# --------------- figure out active columns ------------------------------------------------------- # ------------------------------------------ read header ------------------------------------------
active = defaultdict(list)
for datatype,info in datainfo.items(): table.head_read()
for label in info['label']:
key = '1_%s'%label # ------------------------------------------ sanity checks ----------------------------------------
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: coordCol = table.label_index(options.coords)
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else: else:
active[datatype].append(label) items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
# --------------- assemble new header (metadata and columns containing curl) ----------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for datatype,labels in active.items(): # loop over vector,tensor for type, data in items.iteritems():
for label in labels: for label in data['active']:
table.labels_append(['%i_curlFFT(%s)'%(i+1,label) for i in xrange(datainfo[datatype]['len'])])# extend ASCII header with new labels table.labels_append(['{}_curlFFT({})'.format(i+1,label) for i in xrange(data['dim'])]) # extend ASCII header with new labels
table.head_write() table.head_write()
# --------------- figure out size and grid --------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [{},{},{}] coords = [{},{},{}]
for i in xrange(table.data.shape[0]): for i in xrange(len(table.data)):
for j in xrange(3): for j in xrange(3):
coords[j][str(table.data[i,coordColumns[j]])] = True coords[j][str(table.data[i,coordCol+j])] = True
grid = np.array(map(len,coords),'i') grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness ],'d') # size from bounding box, corrected for cell-centeredness
for i, points in enumerate(grid):
if points == 1: size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
mask = np.ones(3,dtype=bool)
mask[i]=0
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
# ------------------------------------------ process value field ----------------------------------- # ------------------------------------------ process value field -----------------------------------
curl = defaultdict(dict)
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested curls
startColumn=table.labels.index('1_'+label)
curl[datatype][label] = curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
table.data[:,startColumn:startColumn+datainfo[datatype]['len']].\
reshape([grid[2],grid[1],grid[0]]+datainfo[datatype]['shape']))
# ------------------------------------------ add data ------------------------------------------ stack = [table.data]
for datatype,labels in active.items(): # loop over vector,tensor for type, data in items.iteritems():
for label in labels: # loop over all requested curls for i,label in enumerate(data['active']):
for c in xrange(curl[datatype][label][0,:].shape[0]): # append column by column stack.append(curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
lastRow = table.data.shape[1] table.data[:,data['column'][i]:data['column'][i]+data['dim']].\
table.data=np.insert(table.data,lastRow,curl[datatype][label][:,c],1) reshape([grid[2],grid[1],grid[0]]+data['shape'])))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g') table.data_writeArray('%.12g')
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) # ------------------------------------------ output finalization -----------------------------------
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new table.close() # close input ASCII table (works for stdin)
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -16,16 +16,23 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add deformed configuration of given initial coordinates. Add deformed configuration of given initial coordinates.
Operates on periodic ordered three-dimensional data sets. Operates on periodic three-dimensional x,y,z-ordered data sets.
""", version = scriptID) """, version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string', parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'column label of coordinates [%default]') help = 'column label of coordinates [%default]')
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string', parser.add_option('-f','--defgrad',
dest = 'defgrad',
type = 'string', metavar = 'string',
help = 'column label of deformation gradient [%default]') help = 'column label of deformation gradient [%default]')
parser.add_option('--scaling', dest='scaling', type='float', nargs=3, , metavar = ' '.join(['float']*3), parser.add_option('--scaling',
help='x/y/z scaling of displacment fluctuation') dest = 'scaling',
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help = 'x/y/z scaling of displacement fluctuation')
parser.set_defaults(coords = 'ipinitialcoord', parser.set_defaults(coords = 'ipinitialcoord',
defgrad = 'f', defgrad = 'f',
scaling = [1.,1.,1.], scaling = [1.,1.,1.],
@ -34,89 +41,75 @@ parser.set_defaults(coords = 'ipinitialcoord',
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table # ------------------------------------------ read header ------------------------------------------
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process --------------------------------------------------- table.head_read()
if table.label_dimension(options.coords) != 3: # ------------------------------------------ sanity checks ----------------------------------------
file['croak'].write('no coordinate vector (1/2/3_%s) found...\n'%options.coords)
continue errors = []
if table.label_dimension(options.defgrad) != 9: remarks = []
file['croak'].write('no deformation gradient tensor (1..9_%s) found...\n'%options.defgrad)
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad))
else: colF = table.label_index(options.defgrad)
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# --------------- figure out size and grid --------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
colCoords = table.label_index(options.coords) # starting column of location data table.data_readArray()
colDefGrad = table.label_index(options.defgrad) # remember columns of requested data
coords = [{},{},{}] coords = [{},{},{}]
while table.data_read(): # read next data line of ASCII table for i in xrange(len(table.data)):
for j in xrange(3): for j in xrange(3):
coords[j][str(table.data[colCoords+j])] = True # remember coordinate along x,y,z coords[j][str(table.data[i,colCoord+j])] = True
grid = np.array([len(coords[0]),\ grid = np.array(map(len,coords),'i')
len(coords[1]),\
len(coords[2]),],'i') # grid is number of distinct coordinates found
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness ],'d') # size from bounding box, corrected for cell-centeredness
for i, points in enumerate(grid): size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
if points == 1:
options.packing[i] = 1
options.shift[i] = 0
mask = np.ones(3,dtype=bool)
mask[i]=0
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
N = grid.prod() N = grid.prod()
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%s_%s%s'%(coord+1,options.defgrad,options.coords) for coord in xrange(3)]) # extend ASCII header with new labels
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_{}.{}'%(coord+1,options.defgrad,options.coords) for coord in xrange(3)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ read deformation gradient field ----------------------- # ------------------------------------------ process deformation gradient --------------------------
table.data_rewind()
F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(grid))
idx = 0
while table.data_read():
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
idx += 1
F[0:3,0:3,x,y,z] = np.array(map(float,table.data[colDefGrad:colDefGrad+9]),'d').reshape(3,3)
# ------------------------------------------ calculate coordinates --------------------------------- F = table.data[:,colF:colF+9].transpose().reshape([3,3]+list(options.dimension),order='F')
Favg = damask.core.math.tensorAvg(F) Favg = damask.core.math.tensorAvg(F)
centroids = damask.core.mesh.deformedCoordsFFT(size,F,Favg,options.scaling) centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0])
# ------------------------------------------ process data ------------------------------------------ stack = [table.data,centres]
table.data_rewind()
idx = 0
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
idx += 1
table.data_append(list(centroids[:,x,y,z]))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.close() # close tables if len(stack) > 1: table.data = np.hstack(tuple(stack))
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -26,67 +26,68 @@ Add column(s) containing determinant of requested tensor column(s).
""", version = scriptID) """, version = scriptID)
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.tensor == None: if options.tensor == None:
parser.error('no data column specified...') parser.error('no data column specified.')
datainfo = { # list of requested labels per datatype
'tensor': {'len':9,
'label':[]},
}
datainfo['tensor']['label'] += options.tensor
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process --------------------------------------------------- for name in filenames:
active = [] if not (name == 'STDIN' or os.path.exists(name)): continue
column = defaultdict(dict) table = damask.ASCIItable(name = name, outname = name+'_tmp',
for label in datainfo['tensor']['label']: buffered = False)
key = '1_%s'%label table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) # ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
}
errors = []
remarks = []
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else: else:
active.append(label) items[type]['column'].append(table.label_index(what))
column[label] = table.labels.index(key) # remember columns of requested data table.labels_append('det({})'.format(what)) # extend ASCII header with new labels
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for label in active: if errors != []:
table.labels_append('det(%s)'%label) # extend ASCII header with new labels table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for label in active: for type, data in items.iteritems():
table.data_append(determinant(map(float,table.data[column[label]: for column in data['column']:
column[label]+datainfo['tensor']['len']]))) table.data_append(determinant(map(float,table.data[column:
column+data['dim']])))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -11,11 +11,14 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
oneThird = 1.0/3.0 oneThird = 1.0/3.0
def deviator(m): # Carefull, do not change the value of m (its intent(inout)!) def deviator(m,spherical = False): # Carefull, do not change the value of m (its intent(inout)!)
sph = oneThird*(m[0]+m[4]+m[8]) sph = oneThird*(m[0]+m[4]+m[8])
return [m[0] -sph, m[1], m[2], dev = [
m[0]-sph, m[1], m[2],
m[3], m[4]-sph, m[5], m[3], m[4]-sph, m[5],
m[6], m[7], m[8]-sph] m[6], m[7], m[8]-sph,
]
return dev,sph if spherical else dev
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
@ -26,72 +29,77 @@ Add column(s) containing deviator of requested tensor column(s).
""", version = scriptID) """, version = scriptID)
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar='<string LIST>',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
parser.add_option('-s','--spherical', dest='hydrostatic', action='store_true', parser.add_option('-s','--spherical',
help='also add sperical part of tensor (hydrostatic component, pressure)') dest = 'spherical',
action = 'store_true',
help = 'report spherical part of tensor (hydrostatic component, pressure)')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.tensor == None: if options.tensor == None:
parser.error('no data column specified...') parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype # --- loop over input files -------------------------------------------------------------------------
'tensor': {'len':9,
'label':[]},
}
datainfo['tensor']['label'] += options.tensor if filenames == []: filenames = ['STDIN']
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = [] # ------------------------------------------ sanity checks ----------------------------------------
column = defaultdict(dict)
for label in datainfo['tensor']['label']: items = {
key = '1_%s'%label 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
if key not in table.labels: }
file['croak'].write('column %s not found...\n'%key) errors = []
remarks = []
column = {}
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else: else:
active.append(label) items[type]['active'].append(what)
column[label] = table.labels.index(key) # remember columns of requested data items[type]['column'].append(table.label_index(what))
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for label in active: if errors != []:
table.labels_append(['%i_dev(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels table.croak(errors)
if(options.hydrostatic): table.labels_append('sph(%s)'%label) table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_dev({})'.format(i+1,label) for i in xrange(data['dim'])] + \
(['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for label in active: for type, data in items.iteritems():
myTensor = map(float,table.data[column[label]: for column in data['column']:
column[label]+datainfo['tensor']['len']]) table.data_append(deviator(map(float,table.data[column:
table.data_append(deviator(myTensor)) column+data['dim']]),options.spherical))
if(options.hydrostatic): table.data_append(oneThird*(myTensor[0]+myTensor[4]+myTensor[8]))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -12,10 +12,12 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
def divFFT(geomdim,field): def divFFT(geomdim,field):
grid = np.array(np.shape(field)[0:3]) grid = np.array(np.shape(field)[0:3])
wgt = 1.0/np.array(grid).prod() N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
wgt = 1.0/N
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
div_fourier=np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on wether tensor or vector div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector
# differentiation in Fourier space # differentiation in Fourier space
k_s=np.zeros([3],'i') k_s=np.zeros([3],'i')
@ -30,18 +32,13 @@ def divFFT(geomdim,field):
k_s[2] = k k_s[2] = k
if(k > grid[2]/2 ): k_s[2] = k_s[2] - grid[2] if(k > grid[2]/2 ): k_s[2] = k_s[2] - grid[2]
xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16') xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16')
if len(np.shape(field)) == 5: # tensor, 3x3 -> 3 if n == 9: # tensor, 3x3 -> 3
for l in xrange(3): for l in xrange(3):
div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG
elif len(np.shape(field)) == 4: # vector, 3 -> 1 elif n == 3: # vector, 3 -> 1
div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG
div=np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)) return np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)).reshape([N,n/3])
if len(np.shape(field)) == 5: # tensor, 3x3 -> 3
return div.reshape([grid.prod(),3])
elif len(np.shape(field)) == 4: # vector, 3 -> 1
return div.reshape([grid.prod(),1])
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@ -55,113 +52,109 @@ Deals with both vector- and tensor-valued fields.
""", version = scriptID) """, version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string', parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'column heading for coordinates [%default]') help = 'column heading for coordinates [%default]')
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values') help = 'heading of columns containing vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(coords = 'ipinitialcoord',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.vector == None and options.tensor == None: if options.vector == None and options.tensor == None:
parser.error('no data column specified...') parser.error('no data column specified.')
datainfo = { # list of requested labels per datatype
'vector': {'shape':[3],
'len':3,
'label':[]},
'tensor': {'shape':[3,3],
'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] = options.vector
if options.tensor != None: datainfo['tensor']['label'] = options.tensor
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.data_readArray()
# --------------- figure out name of coordinate data (support for legacy .x notation) ------------- for name in filenames:
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys if not (name == 'STDIN' or os.path.exists(name)): continue
if not set(coordLabels).issubset(table.labels): table = damask.ASCIItable(name = name, outname = name+'_tmp',
directions = ['x','y','z'] buffered = False)
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not set(coordLabels).issubset(table.labels):
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
continue
coordColumns = [table.labels.index(label) for label in coordLabels]
# --------------- figure out active columns ------------------------------------------------------- # ------------------------------------------ read header ------------------------------------------
active = defaultdict(list)
for datatype,info in datainfo.items(): table.head_read()
for label in info['label']:
key = '1_%s'%label # ------------------------------------------ sanity checks ----------------------------------------
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: coordCol = table.label_index(options.coords)
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else: else:
active[datatype].append(label) items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
# --------------- assemble new header (metadata and columns containing curl) ----------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for datatype,labels in active.items(): # loop over vector,tensor for type, data in items.iteritems():
for label in labels: for label in data['active']:
table.labels_append(['divFFT(%s)'%(label) if datatype == 'vector' else table.labels_append(['divFFT({})'.format(label) if type == 'vector' else
'%i_divFFT(%s)'%(i+1,label) for i in xrange(datainfo[datatype]['len']//3)])# extend ASCII header with new labels '{}_divFFT({})'.format(i+1,label) for i in xrange(data['dim']//3)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# --------------- figure out size and grid --------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [{},{},{}] coords = [{},{},{}]
for i in xrange(table.data.shape[0]): for i in xrange(len(table.data)):
for j in xrange(3): for j in xrange(3):
coords[j][str(table.data[i,coordColumns[j]])] = True coords[j][str(table.data[i,coordCol+j])] = True
grid = np.array(map(len,coords),'i') grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness ],'d') # size from bounding box, corrected for cell-centeredness
for i, points in enumerate(grid):
if points == 1: size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
mask = np.ones(3,dtype=bool)
mask[i]=0
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
# ------------------------------------------ process value field ----------------------------------- # ------------------------------------------ process value field -----------------------------------
div = defaultdict(dict)
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested curls
startColumn=table.labels.index('1_'+label)
div[datatype][label] = divFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
table.data[:,startColumn:startColumn+datainfo[datatype]['len']].\
reshape([grid[2],grid[1],grid[0]]+datainfo[datatype]['shape']))
# ------------------------------------------ add data ------------------------------------------ stack = [table.data]
for datatype,labels in active.items(): # loop over vector,tensor for type, data in items.iteritems():
for label in labels: # loop over all requested curls for i,label in enumerate(data['active']):
for c in xrange(div[datatype][label][0,:].shape[0]): # append column by column stack.append(divFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
lastRow = table.data.shape[1] table.data[:,data['column'][i]:data['column'][i]+data['dim']].\
table.data=np.insert(table.data,lastRow,div[datatype][label][:,c],1) reshape([grid[2],grid[1],grid[0]]+data['shape'])))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g') table.data_writeArray('%.12g')
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) # ------------------------------------------ output finalization -----------------------------------
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new table.close() # close input ASCII table (works for stdin)
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -36,73 +36,62 @@ Add column(s) containing directional stiffness based on given cubic stiffness va
""", version = scriptID) """, version = scriptID)
parser.add_option('-c','--stiffness', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-c','--stiffness',
dest = 'stiffness',
action = 'extend', metavar = '<string LIST>',
help = 'heading of column containing C11 (followed by C12, C44) field values') help = 'heading of column containing C11 (followed by C12, C44) field values')
parser.add_option('-d','--direction','--hkl', dest='hkl', type='int', nargs=3, metavar='int int int', parser.add_option('-d','--direction','--hkl',
dest = 'hkl',
type = 'int', nargs = 3, metavar = 'int int int',
help = 'direction of elastic modulus [%default]') help = 'direction of elastic modulus [%default]')
parser.set_defaults(hkl = (1,1,1)) parser.set_defaults(hkl = (1,1,1),
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.vector == None: if options.stiffness == None:
parser.error('no data column specified...') parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype # --- loop over input files -------------------------------------------------------------------------
'vector': {'len':3,
'label':[]},
}
datainfo['vector']['label'] += options.vector if filenames == []: filenames = ['STDIN']
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = [] # ------------------------------------------ sanity checks ----------------------------------------
column = defaultdict(dict)
for label in datainfo['vector']['label']: remarks = []
key = '1_%s'%label columns = []
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) for i,column in enumerate(table.label_index(options.stiffness)):
if column < 0: remarks.append('column {} not found.'.format(options.stiffness[i]))
else: else:
active.append(label) columns.append(column)
column[label] = table.labels.index(key) # remember columns of requested data table.labels_append(['E{}{}{}({})'.format(*options.hkl,options.stiffness[i])) # extend ASCII header with new labels
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for label in active:
table.labels_append('E%i%i%i(%s)'%(options.hkl[0], # ------------------------------------------ assemble header --------------------------------------
options.hkl[1],
options.hkl[2],label)) # extend ASCII header with new labels table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for label in active: for column in columns:
table.data_append(E_hkl(map(float,table.data[column[label]:\ table.data_append(E_hkl(map(float,table.data[column:column+3]),options.hkl))
column[label]+datainfo['vector']['len']]),options.hkl))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -106,11 +106,11 @@ parser.set_defaults(scale = 1.0)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.type == None: if options.type == None:
parser.error('no feature type selected...') parser.error('no feature type selected.')
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))): if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))):
parser.error('type must be chosen from (%s)...'%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
if 'biplane' in options.type and 'boundary' in options.type: if 'biplane' in options.type and 'boundary' in options.type:
parser.error("both aliases 'biplane' and 'boundary' are selected...") parser.error("only one from aliases 'biplane' and 'boundary' possible.")
feature_list = [] feature_list = []
for i,feature in enumerate(features): for i,feature in enumerate(features):
@ -120,55 +120,69 @@ for i,feature in enumerate(features):
feature_list.append(i) # remember valid features feature_list.append(i) # remember valid features
break break
files = [] # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.data_readArray()
# --------------- figure out name of coordinate data (support for legacy .x notation) ------------ # ------------------------------------------ sanity checks ----------------------------------------
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
directions = ['x','y','z']
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
continue
coordColumns = [table.labels.index(label) for label in coordLabels]
# --------------- figure out active column -------------------------------------------------------- errors = []
if options.id not in table.labels: remarks = []
file['croak'].write('column %s not found...\n'%options.id) column = {}
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: coordCol = table.label_index(options.coords)
if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id))
else: idCol = table.label_index(options.id)
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for feature in feature_list: for feature in feature_list:
table.labels_append('ED_%s(%s)'%(features[feature]['names'][0],options.id)) # extend ASCII header with new labels table.labels_append('ED_{}({})'.format(features[feature]['names'][0],options.id)) # extend ASCII header with new labels
table.head_write() table.head_write()
# --------------- figure out grid ----------------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [{},{},{}] coords = [{},{},{}]
for i in xrange(len(table.data)): for i in xrange(len(table.data)):
for j in xrange(3): for j in xrange(3):
coords[j][str(table.data[i,coordColumns[j]])] = True coords[j][str(table.data[i,coordCol+j])] = True
grid = np.array(map(len,coords),'i') grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
# ------------------------------------------ process value field ----------------------------------- # ------------------------------------------ process value field -----------------------------------
unitlength = 0.0
for i,r in enumerate(grid): stack = [table.data]
if r > 1: unitlength = max(unitlength,(max(map(float,coords[i].keys()))-min(map(float,coords[i].keys())))/(r-1.0))
neighborhood = neighborhoods[options.neighborhood] neighborhood = neighborhoods[options.neighborhood]
convoluted = np.empty([len(neighborhood)]+list(grid+2),'i') convoluted = np.empty([len(neighborhood)]+list(grid+2),'i')
microstructure = periodic_3Dpad(np.array(table.data[:,table.labels.index(options.id)].reshape(grid),'i')) microstructure = periodic_3Dpad(np.array(table.data[:,idCol].reshape(grid),'i'))
for i,p in enumerate(neighborhood): for i,p in enumerate(neighborhood):
stencil = np.zeros((3,3,3),'i') stencil = np.zeros((3,3,3),'i')
@ -191,19 +205,18 @@ for file in files:
for i,feature_id in enumerate(feature_list): for i,feature_id in enumerate(feature_list):
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
for i in xrange(len(feature_list)):
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
distance.shape = (len(feature_list),grid.prod())
# ------------------------------------------ add data ------------------------------------------ distance.shape = (len(feature_list),grid.prod())
for i in xrange(len(feature_list)): for i in xrange(len(feature_list)):
lastRow = table.data.shape[1] stack.append(distance[i,:])
table.data=np.insert(table.data,lastRow,distance[i,:],1)
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g') table.data_writeArray('%.12g')
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) # ------------------------------------------ output finalization -----------------------------------
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new table.close() # close input ASCII table (works for stdin)
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -10,25 +10,6 @@ from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
scriptID = string.replace('$Id: addGrainID.py 2549 2013-07-10 09:13:21Z MPIE\p.eisenlohr $','\n','\\n') scriptID = string.replace('$Id: addGrainID.py 2549 2013-07-10 09:13:21Z MPIE\p.eisenlohr $','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0] scriptName = os.path.splitext(scriptID.split()[1])[0]
#--------------------------------------------------------------------------------------------------
class extendedOption(Option):
#--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
# ----------------------------- # -----------------------------
class backgroundMessage(threading.Thread): class backgroundMessage(threading.Thread):
# ----------------------------- # -----------------------------
@ -67,117 +48,121 @@ class backgroundMessage(threading.Thread):
self.print_message() self.print_message()
parser = OptionParser(option_class=extendedOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add grain index based on similiarity of crystal lattice orientation. Add grain index based on similiarity of crystal lattice orientation.
""" + string.replace(scriptID,'\n','\\n') """ + string.replace(scriptID,'\n','\\n')
) )
parser.add_option('-r', '--radius', dest='radius', type='float', parser.add_option('-r', '--radius',
dest = 'radius',
type = 'float', metavar = 'float',
help = 'search radius') help = 'search radius')
parser.add_option('-d', '--disorientation', dest='disorientation', type='float', metavar='ANGLE', parser.add_option('-d', '--disorientation',
dest = 'disorientation',
type = 'float', metavar = 'float',
help = 'disorientation threshold per grain [%default] (degrees)') help = 'disorientation threshold per grain [%default] (degrees)')
parser.add_option('-s', '--symmetry', dest='symmetry', type='string', parser.add_option('-s', '--symmetry',
dest = 'symmetry',
type = 'string', metavar = 'string',
help = 'crystal symmetry [%default]') help = 'crystal symmetry [%default]')
parser.add_option('-e', '--eulers', dest='eulers', type='string', metavar='LABEL', parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'Euler angles') help = 'Euler angles')
parser.add_option( '--degrees', dest='degrees', action='store_true', parser.add_option( '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'Euler angles are given in degrees [%default]') help = 'Euler angles are given in degrees [%default]')
parser.add_option('-m', '--matrix', dest='matrix', type='string', metavar='LABEL', parser.add_option('-m', '--matrix',
dest = 'matrix',
type = 'string', metavar = 'string',
help = 'orientation matrix') help = 'orientation matrix')
parser.add_option('-a', dest='a', type='string', metavar='LABEL', parser.add_option('-a',
dest = 'a',
type = 'string', metavar = 'string',
help = 'crystal frame a vector') help = 'crystal frame a vector')
parser.add_option('-b', dest='b', type='string', metavar='LABEL', parser.add_option('-b',
dest = 'b',
type = 'string', metavar = 'string',
help = 'crystal frame b vector') help = 'crystal frame b vector')
parser.add_option('-c', dest='c', type='string', metavar='LABEL', parser.add_option('-c',
dest = 'c',
type = 'string', metavar = 'string',
help = 'crystal frame c vector') help = 'crystal frame c vector')
parser.add_option('-q', '--quaternion', dest='quaternion', type='string', metavar='LABEL', parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar = 'string',
help = 'quaternion') help = 'quaternion')
parser.add_option('-p', '--position', dest='position', type='string', metavar='LABEL', parser.add_option('-p', '--position',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'spatial position of voxel [%default]') help = 'spatial position of voxel [%default]')
parser.set_defaults(symmetry = 'cubic') parser.set_defaults(symmetry = 'cubic',
parser.set_defaults(position = 'pos') coords = 'pos',
parser.set_defaults(degrees = False) degrees = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
if options.radius == None: if options.radius == None:
parser.error('no radius specified.') parser.error('no radius specified.')
datainfo = { # list of requested labels per datatype input = [options.eulers != None,
'tensor': {'len':9, options.a != None and \
'label':[]},
'vector': {'len':3,
'label':[]},
'quaternion': {'len':4,
'label':[]},
}
if options.eulers != None: datainfo['vector']['label'] += [options.eulers]; input = 'eulers'
if options.a != None and \
options.b != None and \ options.b != None and \
options.c != None: datainfo['vector']['label'] += [options.a,options.b,options.c]; input = 'frame' options.c != None,
if options.matrix != None: datainfo['tensor']['label'] += [options.matrix]; input = 'matrix' options.matrix != None,
if options.quaternion != None: datainfo['quaternion']['label'] += [options.quaternion]; input = 'quaternion' options.quaternion != None,
]
datainfo['vector']['label'] += [options.position] if np.sum(input) != 1: parser.error('needs exactly one input format.')
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians (label,dim,inputtype) = [(options.eulers,3,'eulers'),
cos_disorientation = np.cos(options.disorientation/2.0*toRadians) ([options.a,options.b,options.c],[3,3,3],'frame'),
(options.matrix,9,'matrix'),
(options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
# ------------------------------------------ setup file handles --------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
files = []
if filenames == []:
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'input':open(name), buffered = False)
'output':open(name+'_tmp','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stderr})
#--- loop over input files ------------------------------------------------------------------------ # ------------------------------------------ read header -------------------------------------------
for file in files: table.head_read()
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table # ------------------------------------------ sanity checks -----------------------------------------
table.head_read() # read ASCII header info
# --------------- figure out columns to process errors = []
remarks = []
column = {} if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
missingColumns = False if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
else: column = table.label_index(label)
for datatype,info in datainfo.items(): if remarks != []: table.croak(remarks)
for label in info['label']: if errors != []:
key = list(set([label, '1_'+label]) & set(table.labels)) # check for intersection with table labels table.croak(errors)
if key == []: table.close(dismiss = True)
file['croak'].write('column %s not found...\n'%label)
missingColumns = True # break if label not found
else:
column[label] = table.labels.index(key[0]) # remember columns of requested data
if missingColumns:
continue continue
table.labels_append('grainID_%g'%options.disorientation)
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append('grainID_{}@{}'.format(','.join(labels),options.disorientation/toRadians)) # report orientation source and disorientation in degrees
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ------------------------------------------
# ------------------------------------------ build KD tree ---------------------------------------
# ------------------------------------------ build KD tree -----------------------------------------
# --- start background messaging # --- start background messaging
@ -186,36 +171,25 @@ for file in files:
bg.set_message('reading positions...') bg.set_message('reading positions...')
backup_readSize = table.__IO__['validReadSize'] # bad hack to circumvent overwriting by readArray... table.data_readArray(options.coords) # read position vectors
backup_labels = table.labels # bad hack...
table.data_rewind()
table.data_readArray(range(column[options.position],
column[options.position]+datainfo['vector']['len'])) # read position vectors
# file['croak'].write('%i\n'%(len(table.data)))
grainID = -np.ones(len(table.data),dtype=int) grainID = -np.ones(len(table.data),dtype=int)
start = tick = time.clock() start = tick = time.clock()
bg.set_message('building KD tree...') bg.set_message('building KD tree...')
kdtree = spatial.KDTree(copy.deepcopy(table.data)) kdtree = spatial.KDTree(copy.deepcopy(table.data))
# neighborhood = kdtree.query_ball_tree(kdtree,options.radius)
# file['croak'].write('%.2f seconds\n'%(time.clock()-tick))
# file['croak'].write('%i points\n'%(len(neighborhood)))
# ------------------------------------------ assign grain IDs --------------------------------------
# ------------------------------------------ assign grain IDs --------------------------------------- tick = time.clock()
orientations = [] # quaternions found for grain orientations = [] # quaternions found for grain
memberCounts = [] # number of voxels in grain memberCounts = [] # number of voxels in grain
table.data_rewind()
table.__IO__['validReadSize'] = backup_readSize # bad hack to circumvent overwriting by readArray...
table.labels = backup_labels # bad hack...
p = 0 # point counter p = 0 # point counter
g = 0 # grain counter g = 0 # grain counter
matchedID = -1 matchedID = -1
lastDistance = np.dot(kdtree.data[-1]-kdtree.data[0],kdtree.data[-1]-kdtree.data[0]) # (arbitrarily) use diagonal of cloud lastDistance = np.dot(kdtree.data[-1]-kdtree.data[0],kdtree.data[-1]-kdtree.data[0]) # (arbitrarily) use diagonal of cloud
tick = time.clock() table.data_rewind()
while table.data_read(): # read next data line of ASCII table while table.data_read(): # read next data line of ASCII table
if p > 0 and p % 1000 == 0: if p > 0 and p % 1000 == 0:
@ -223,31 +197,19 @@ for file in files:
time_delta = (time.clock()-tick) * (len(grainID) - p) / p time_delta = (time.clock()-tick) * (len(grainID) - p) / p
bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
if input == 'eulers': if inputtype == 'eulers':
o = damask.Orientation(Eulers=toRadians*\ o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
np.array(map(float,table.data[column[options.eulers]:\
column[options.eulers]+datainfo['vector']['len']])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'matrix': elif inputtype == 'matrix':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
np.array([map(float,table.data[column[options.matrix]:\
column[options.matrix]+datainfo['tensor']['len']])]).reshape(np.sqrt(datainfo['tensor']['len']),
np.sqrt(datainfo['tensor']['len'])).transpose(),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'frame': elif inputtype == 'frame':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
np.array([map(float,table.data[column[options.a]:\ table.data[column[1]:column[1]+3] + \
column[options.a]+datainfo['vector']['len']] + \ table.data[column[2]:column[2]+3])).reshape(3,3),
table.data[column[options.b]:\
column[options.b]+datainfo['vector']['len']] + \
table.data[column[options.c]:\
column[options.c]+datainfo['vector']['len']]
)]).reshape(3,3),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'quaternion': elif inputtype == 'quaternion':
o = damask.Orientation(quaternion=\ o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
np.array(map(float,table.data[column[options.quaternion]:\
column[options.quaternion]+datainfo['quaternion']['len']])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
matched = False matched = False
@ -257,17 +219,11 @@ for file in files:
if matchedID != -1: # has matched before? if matchedID != -1: # has matched before?
matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation
# if matchedID > 0: # has matched before?
# thisDistance = np.dot(kdtree.data[p]-kdtree.data[p-1],kdtree.data[p]-kdtree.data[p-1],)
# if thisDistance < 4.*lastDistance: # about as close as last point pair?
# disorientation = o.disorientation(orientations[matchedID-1]).quaternion.w # check whether former grainID matches now again
# matched = disorientation > cos_disorientation
# lastDistance = thisDistance
#
if not matched: if not matched:
alreadyChecked = {} alreadyChecked = {}
bestDisorientation = damask.Orientation(quaternion=np.array([0,0,0,1]),symmetry = options.symmetry) # initialize to 180 deg rotation as worst case bestDisorientation = damask.Orientation(quaternion = np.array([0,0,0,1]),
symmetry = options.symmetry) # initialize to 180 deg rotation as worst case
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
gID = grainID[i] gID = grainID[i]
if gID != -1 and gID not in alreadyChecked: # an already indexed point belonging to a grain not yet tested? if gID != -1 and gID not in alreadyChecked: # an already indexed point belonging to a grain not yet tested?
@ -277,8 +233,6 @@ for file in files:
disorientation.quaternion.w >= bestDisorientation.quaternion.w: # within disorientation threshold and better than current best? disorientation.quaternion.w >= bestDisorientation.quaternion.w: # within disorientation threshold and better than current best?
matched = True matched = True
matchedID = gID # remember that grain matchedID = gID # remember that grain
# file['croak'].write('%i %f '%(matchedID,disorientation.quaternion.w))
bestDisorientation = disorientation bestDisorientation = disorientation
if not matched: # no match -> new grain found if not matched: # no match -> new grain found
@ -286,27 +240,14 @@ for file in files:
orientations += [o] # initialize with current orientation orientations += [o] # initialize with current orientation
matchedID = g matchedID = g
g += 1 # increment grain counter g += 1 # increment grain counter
# file['croak'].write('+')
else: # did match existing grain else: # did match existing grain
memberCounts[matchedID] += 1 memberCounts[matchedID] += 1
# file['croak'].write('got back %s is close by %f to %s\n'%(np.degrees(bestQ.asEulers()),np.degrees(2*np.arccos(bestDisorientation.quaternion.w)),np.degrees(bestFormerQ.asEulers())))
# file['croak'].write('.%i %s'%(matchedID, orientations[matchedID-1].quaternion))
# M = (1. - 1./memberCounts[matchedID-1]) * bestFormerQ.asM() + 1./memberCounts[matchedID-1] * bestQ.asM() # 4x4 matrix holding weighted quaternion outer products per grain
# w,v = np.linalg.eigh(M)
# avgQ = damask.Orientation(quaternion=v[:,w.argmax()],symmetry=options.symmetry)
# file['croak'].write('new avg has misori of %f\n'%np.degrees(2*np.arccos(orientations[matchedID-1].disorientation(avgQ)[0].quaternion.w)))
# orientations[matchedID-1].quaternion = damask.Quaternion(v[:,w.argmax()])
# orientations[matchedID-1] = damask.Orientation(quaternion = bestDisorientation.quaternion**(1./memberCounts[matchedID-1]) \
# * orientations[matchedID-1].quaternion,
# symmetry = options.symmetry) # adjust average orientation taking newest member into account
# file['croak'].write(' stored --> %s\n'%(np.degrees(orientations[matchedID-1].quaternion.asEulers())))
# file['croak'].write('.')
grainID[p] = matchedID # remember grain index assigned to point grainID[p] = matchedID # remember grain index assigned to point
p += 1 # increment point p += 1 # increment point
bg.set_message('identifying similar orientations among %i grains...'%(len(orientations))) bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations)))
memberCounts = np.array(memberCounts) memberCounts = np.array(memberCounts)
similarOrientations = [[] for i in xrange(len(orientations))] similarOrientations = [[] for i in xrange(len(orientations))]
@ -318,7 +259,7 @@ for file in files:
similarOrientations[j].append(i) # ...and lower triangle of matrix similarOrientations[j].append(i) # ...and lower triangle of matrix
if similarOrientations[i] != []: if similarOrientations[i] != []:
bg.set_message('grainID %i is as: %s'%(i,' '.join(map(lambda x:str(x),similarOrientations[i])))) bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(lambda x:str(x),similarOrientations[i]))))
stillShifting = True stillShifting = True
while stillShifting: while stillShifting:
@ -340,7 +281,7 @@ for file in files:
grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset
memberCounts[gID] -= 1 # my former ID loses one fellow memberCounts[gID] -= 1 # my former ID loses one fellow
memberCounts[grainID[p]] += 1 # my new ID gains one fellow memberCounts[grainID[p]] += 1 # my new ID gains one fellow
bg.set_message('%i:%i --> %i'%(p,gID,grainID[p])) # report switch of grainID bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID
stillShifting = True stillShifting = True
table.data_rewind() table.data_rewind()
@ -350,14 +291,9 @@ for file in files:
table.data_write() # output processed line table.data_write() # output processed line
p += 1 p += 1
bg.set_message('done after %i seconds'%(time.clock()-start)) bg.set_message('done after {} seconds'.format(time.clock()-start))
# for i,o in enumerate(orientations): # croak about average grain orientations # ------------------------------------------ output finalization -----------------------------------
# file['croak'].write('%i: %s\n'%(i,' '.join(map(str,o.quaternion.asEulers()))))
# ------------------------------------------ output result ---------------------------------------
table.output_flush() # just in case of buffered ASCII table
table.close() # close ASCII tables table.close() # close ASCII tables
if file['name'] != 'STDIN': if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -18,28 +18,45 @@ Add RGB color value corresponding to TSL-OIM scheme for inverse pole figures.
""", version = scriptID) """, version = scriptID)
parser.add_option('-p', '--pole', dest='pole', type='float', nargs=3, metavar='float float float', parser.add_option('-p', '--pole',
dest = 'pole',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'lab frame direction for inverse pole figure [%default]') help = 'lab frame direction for inverse pole figure [%default]')
parser.add_option('-s', '--symmetry', dest='symmetry', type='choice', parser.add_option('-s', '--symmetry',
choices=damask.Symmetry.lattices[1:], metavar='string', dest = 'symmetry',
help = 'crystal symmetry [cubic] {%s} '%(', '.join(damask.Symmetry.lattices[1:]))) type = 'choice', choices = damask.Symmetry.lattices[1:], metavar='string',
parser.add_option('-e', '--eulers', dest='eulers', metavar='string', help = 'crystal symmetry [%default] {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'Euler angles label') help = 'Euler angles label')
parser.add_option('-d', '--degrees', dest='degrees', action='store_true', parser.add_option('-d', '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'Euler angles are given in degrees [%default]') help = 'Euler angles are given in degrees [%default]')
parser.add_option('-m', '--matrix', dest='matrix', metavar='string', parser.add_option('-m', '--matrix',
dest = 'matrix',
type = 'string', metavar = 'string',
help = 'orientation matrix label') help = 'orientation matrix label')
parser.add_option('-a', dest='a', metavar='string', parser.add_option('-a',
dest = 'a',
type = 'string', metavar = 'string',
help = 'crystal frame a vector label') help = 'crystal frame a vector label')
parser.add_option('-b', dest='b', metavar='string', parser.add_option('-b',
dest = 'b',
type = 'string', metavar = 'string',
help = 'crystal frame b vector label') help = 'crystal frame b vector label')
parser.add_option('-c', dest='c', metavar='string', parser.add_option('-c',
dest = 'c',
type = 'string', metavar = 'string',
help = 'crystal frame c vector label') help = 'crystal frame c vector label')
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string', parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar = 'string',
help = 'quaternion label') help = 'quaternion label')
parser.set_defaults(pole = (0.0,0.0,1.0), parser.set_defaults(pole = (0.0,0.0,1.0),
symmetry = 'cubic', symmetry = damask.Symmetry.lattices[-1],
degrees = False, degrees = False,
) )
@ -53,7 +70,7 @@ input = [options.eulers != None,
options.quaternion != None, options.quaternion != None,
] ]
if np.sum(input) != 1: parser.error('needs exactly one input format...') if np.sum(input) != 1: parser.error('needs exactly one input format.')
(label,dim,inputtype) = [(options.eulers,3,'eulers'), (label,dim,inputtype) = [(options.eulers,3,'eulers'),
([options.a,options.b,options.c],[3,3,3],'frame'), ([options.a,options.b,options.c],[3,3,3],'frame'),
@ -64,64 +81,58 @@ toRadians = math.pi/180.0 if options.degrees else 1.0
pole = np.array(options.pole) pole = np.array(options.pole)
pole /= np.linalg.norm(pole) pole /= np.linalg.norm(pole)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files ------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table # ------------------------------------------ read header ------------------------------------------
table.head_read() # read ASCII header info
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
if not np.all(table.label_dimension(label) == dim): if not np.all(table.label_dimension(label) == dim):
file['croak'].write('input %s has wrong dimension %i...\n'%(label,dim)) table.croak('input {} has wrong dimension {}.'.format(label,dim))
table.close(dismiss = True) # close ASCIItable and remove empty file table.close(dismiss = True) # close ASCIItable and remove empty file
continue continue
column = table.label_index(label) column = table.label_index(label)
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['%i_IPF_%g%g%g_%s'%(i+1,options.pole[0],options.pole[1],options.pole[2],options.symmetry.lower()) for i in xrange(3)]) table.labels_append(['{}_IPF_{}{}{}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in xrange(3)])
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
if inputtype == 'eulers': if inputtype == 'eulers':
o = damask.Orientation(Eulers=toRadians*\ o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
np.array(map(float,table.data[column:column+3])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif inputtype == 'matrix': elif inputtype == 'matrix':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
np.array([map(float,table.data[column:column+9])]).reshape(3,3).transpose(),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif inputtype == 'frame': elif inputtype == 'frame':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
np.array([map(float,table.data[column[0]:column[0]+3] + \
table.data[column[1]:column[1]+3] + \ table.data[column[1]:column[1]+3] + \
table.data[column[2]:column[2]+3] table.data[column[2]:column[2]+3])).reshape(3,3),
)]).reshape(3,3),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif inputtype == 'quaternion': elif inputtype == 'quaternion':
o = damask.Orientation(quaternion=\ o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
np.array(map(float,table.data[column:column+4])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
table.data_append(o.IPFcolor(pole)) table.data_append(o.IPFcolor(pole))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -17,102 +17,89 @@ Add data in column(s) of second ASCIItable selected from row that is given by th
""", version = scriptID) """, version = scriptID)
parser.add_option('-a','--asciitable', dest='asciitable', metavar='string', parser.add_option('-c','--map',
help='mapped ASCIItable') dest = 'map',
parser.add_option('-c','--map', dest='map', metavar='string', type = 'string', metavar = 'string',
help = 'heading of column containing row mapping') help = 'heading of column containing row mapping')
parser.add_option('-o','--offset', dest='offset', type='int', metavar='int', parser.add_option('-o','--offset',
help='offset between mapped column value and row [%default]') dest = 'offset',
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', type = 'int', metavar = 'int',
help='heading of columns containing vector field values') help = 'offset between mapping column value and actual row in mapped table [%default]')
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
help='heading of columns containing tensor field values') dest = 'label',
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>', action = 'extend', metavar = '<string LIST>',
help='heading of columns containing field values of special dimension') help='heading of column(s) to be mapped')
parser.add_option('-d','--dimension', dest='N', type='int', metavar='int', parser.add_option('-a','--asciitable',
help='dimension of special field values [%default]') dest = 'asciitable',
parser.set_defaults(offset = 0) type = 'string', metavar = 'string',
parser.set_defaults(N = 1) help = 'mapped ASCIItable')
parser.set_defaults(offset = 0,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if (not None) in [options.vector,options.tensor,options.special]: if options.label == None:
parser.error('no data column specified...') parser.error('no data columns specified.')
if options.map == None: if options.map == None:
parser.error('missing mapping column...') parser.error('no mapping column given.')
datainfo = { # list of requested labels per datatype # ------------------------------------------ process mapping ASCIItable ---------------------------
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
'special': {'len':options.N,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
if options.special != None: datainfo['special']['label'] += options.special
# ------------------------------------------ processing mapping ASCIItable -------------------------
if options.asciitable != None and os.path.isfile(options.asciitable): if options.asciitable != None and os.path.isfile(options.asciitable):
mappedTable = damask.ASCIItable(open(options.asciitable),None,False)
mappedTable = damask.ASCIItable(name = options.asciitable,buffered = False, readonly = True)
mappedTable.head_read() # read ASCII header info of mapped table mappedTable.head_read() # read ASCII header info of mapped table
missing_labels = mappedTable.data_readArray(options.label)
labels = [] if len(missing_labels) > 0:
for datatype,info in datainfo.items(): mappedTable.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
for label in info['label']:
keys = ['%i_'%(i+1)+label for i in xrange(info['len'])] if info['len'] > 1 else [label]
if set(keys).issubset(mappedTable.labels):
labels+=keys # extend labels
else:
sys.stderr.write('column %s not found...\n'%label)
break
mappedTable.data_readArray(labels)
mappedTable.input_close() # close mapped input ASCII table
else: else:
parser.error('missing mapped ASCIItable...') parser.error('no mapped ASCIItable given.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
if options.map not in table.labels: # ------------------------------------------ sanity checks ----------------------------------------
file['croak'].write('column %s not found...\n'%options.map)
errors = []
mappedColumn = table.label_index(options.map)
if mappedColumn < 0: errors.append('mapping column {} not found.'.format(options.map))
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header -------------------------------------- # ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for label in mappedTable.labels: table.labels_append(mappedTable.labels) # extend ASCII header with new labels
table.labels_append(label)
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
mappedColumn = table.labels.index(options.map)
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
table.data_append(mappedTable.data[int(table.data[mappedColumn])+options.offset-1]) # add all mapped data types table.data_append(mappedTable.data[int(table.data[mappedColumn])+options.offset-1]) # add all mapped data types
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new mappedTable.close() # close mapped input ASCII table

View File

@ -29,76 +29,78 @@ Add vonMises equivalent values for symmetric part of requested strains and/or st
""", version = scriptID) """, version = scriptID)
parser.add_option('-e','--strain', dest='strain', action='extend', metavar='<string LIST>', parser.add_option('-e','--strain',
dest = 'strain',
action = 'extend', metavar = '<string LIST>',
help = 'heading(s) of columns containing strain tensors') help = 'heading(s) of columns containing strain tensors')
parser.add_option('-s','--stress', dest='stress', action='extend', metavar='<string LIST>', parser.add_option('-s','--stress',
dest = 'stress',
action = 'extend', metavar = '<string LIST>',
help = 'heading(s) of columns containing stress tensors') help = 'heading(s) of columns containing stress tensors')
parser.set_defaults(strain = [],
stress = [],
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if (not None) in [options.strain,options.stress]: if len(options.stress+options.strain) == 0:
parser.error('no data column specified...') parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype # --- loop over input files -------------------------------------------------------------------------
'strain': {'len':9,
'label':[]},
'stress': {'len':9,
'label':[]},
}
if options.strain != None: datainfo['strain']['label'] += options.strain if filenames == []: filenames = ['STDIN']
if options.stress != None: datainfo['stress']['label'] += options.stress
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = defaultdict(list) # ------------------------------------------ sanity checks ----------------------------------------
column = defaultdict(dict)
for datatype,info in datainfo.items(): items = {
for label in info['label']: 'strain': {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []},
key = '1_%s'%label 'stress': {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []},
if key not in table.labels: }
file['croak'].write('column %s not found...\n'%key) errors = []
remarks = []
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else: else:
active[datatype].append(label) items[type]['active'].append(what)
column[datatype][label] = table.labels.index(key) # remember columns of requested data items[type]['column'].append(table.label_index(what))
table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for datatype,labels in active.items(): # loop over vector,tensor if errors != []:
for label in labels: # loop over all requested determinants table.croak(errors)
table.labels_append('Mises(%s)'%label) # extend ASCII header with new labels table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor for type, data in items.iteritems():
for label in labels: # loop over all requested norms for column in data['column']:
table.data_append(Mises(datatype, table.data_append(Mises(type,
np.array(map(float,table.data[column[datatype][label]: np.array(table.data[column:column+data['dim']],'d').reshape(data['shape'])))
column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(3,3)))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -10,13 +10,14 @@ scriptID = string.replace('$Id$','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0] scriptName = os.path.splitext(scriptID.split()[1])[0]
# definition of element-wise p-norms for matrices # definition of element-wise p-norms for matrices
def normAbs(object): # p = 1
def norm(which,object):
if which == 'Abs': # p = 1
return sum(map(abs, object)) return sum(map(abs, object))
elif which == 'Frobenius': # p = 2
def normFrobenius(object): # p = 2
return math.sqrt(sum([x*x for x in object])) return math.sqrt(sum([x*x for x in object]))
elif which == 'Max': # p = inf
def normMax(object): # p = infinity
return max(map(abs, object)) return max(map(abs, object))
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@ -29,86 +30,73 @@ Add column(s) containing norm of requested column(s) being either vectors or ten
""", version = scriptID) """, version = scriptID)
normChoices = ['abs','frobenius','max'] normChoices = ['abs','frobenius','max']
parser.add_option('-n','--norm', dest='norm', type='choice', choices=normChoices, metavar='string', parser.add_option('-n','--norm',
dest = 'norm',
type = 'choice', choices = normChoices, metavar='string',
help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices)))) help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices))))
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
help='heading of columns containing vector field values') dest = 'label',
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', action = 'extend', metavar = '<string LIST>',
help='heading of columns containing tensor field values') help = 'heading of column(s) to calculate norm of')
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>',
help='heading of columns containing field values of special dimension') parser.set_defaults(norm = 'frobenius',
parser.add_option('-d','--dimension', dest='N', type='int', metavar='int', )
help='dimension of special field values [%default]')
parser.set_defaults(norm = 'frobenius')
parser.set_defaults(N = 12)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if (not None) in [options.vector,options.tensor,options.special]: if options.label == None:
parser.error('no data column specified...') parser.error('no data column specified.')
datainfo = { # list of requested labels per datatype
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
'special': {'len':options.N,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
if options.special != None: datainfo['special']['label'] += options.special
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = defaultdict(list) for name in filenames:
column = defaultdict(dict) if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
for datatype,info in datainfo.items(): # ------------------------------------------ read header ------------------------------------------
for label in info['label']:
key = '1_'+label if info['len'] > 1 else label # columns of non-scalars need to start with '1_' table.head_read()
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) # ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
columns = []
dims = []
for what in options.label:
dim = table.label_dimension(what)
if dim < 0: remarks.append('column {} not found...'.format(what))
else: else:
active[datatype].append(label) dims.append(dim)
column[datatype][label] = table.labels.index(key) # remember columns of requested data columns.append(table.label_index(what))
table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for datatype,labels in active.items(): # loop over vector,tensor if errors != []:
for label in labels: # loop over all requested determinants table.croak(errors)
table.labels_append('norm%s(%s)'%(options.norm.capitalize(),label)) # extend ASCII header with new labels table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor for column,dim in zip(columns,dims):
for label in labels: # loop over all requested norms table.data_append(norm(options.norm.capitalize(),
eval("table.data_append(norm%s(map(float,table.data[column[datatype][label]:"\ map(float,table.data[column:column+dim])))
"column[datatype][label]+datainfo[datatype]['len']])))"%options.norm.capitalize())
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -20,151 +20,141 @@ Orientation is given by quaternion, Euler angles, rotation matrix, or crystal fr
""", version = scriptID) """, version = scriptID)
outputChoices = ['quaternion','eulers'] outputChoices = ['quaternion','eulers']
parser.add_option('-o', '--output', dest='output', action='extend', metavar='<string LIST>', parser.add_option('-o', '--output',
dest = 'output',
action = 'extend', metavar = '<string LIST>',
help = 'output orientation formats {%s}'%(','.join(outputChoices))) help = 'output orientation formats {%s}'%(','.join(outputChoices)))
parser.add_option('-s', '--symmetry', dest='symmetry', type='choice', parser.add_option('-r', '--rotation',
choices=damask.Symmetry.lattices[1:], metavar='string', dest='rotation',
help = 'crystal symmetry [cubic] {%s}'%(', '.join(damask.Symmetry.lattices[1:]))) type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
parser.add_option('-r', '--rotation', dest='rotation', type='float', nargs=4, metavar='float float float float',
help = 'angle and axis to (pre)rotate orientation') help = 'angle and axis to (pre)rotate orientation')
parser.add_option('-e', '--eulers', dest='eulers', metavar='string',
parser.add_option('-s', '--symmetry',
dest = 'symmetry',
type = 'choice', choices = damask.Symmetry.lattices[1:], metavar='string',
help = 'crystal symmetry [%default] {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'Euler angles label') help = 'Euler angles label')
parser.add_option('-d', '--degrees', dest='degrees', action='store_true', parser.add_option('-d', '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'Euler angles are given in degrees [%default]') help = 'Euler angles are given in degrees [%default]')
parser.add_option('-m', '--matrix', dest='matrix', metavar='string', parser.add_option('-m', '--matrix',
dest = 'matrix',
type = 'string', metavar = 'string',
help = 'orientation matrix label') help = 'orientation matrix label')
parser.add_option('-a', dest='a', metavar='string', parser.add_option('-a',
dest = 'a',
type = 'string', metavar = 'string',
help = 'crystal frame a vector label') help = 'crystal frame a vector label')
parser.add_option('-b', dest='b', metavar='string', parser.add_option('-b',
dest = 'b',
type = 'string', metavar = 'string',
help = 'crystal frame b vector label') help = 'crystal frame b vector label')
parser.add_option('-c', dest='c', metavar='string', parser.add_option('-c',
dest = 'c',
type = 'string', metavar = 'string',
help = 'crystal frame c vector label') help = 'crystal frame c vector label')
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string', parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar = 'string',
help = 'quaternion label') help = 'quaternion label')
parser.set_defaults(symmetry = 'cubic')
parser.set_defaults(rotation = (0.,1.,1.,1.)) # no rotation about 1,1,1 parser.set_defaults(output = [],
parser.set_defaults(degrees = False) symmetry = damask.Symmetry.lattices[-1],
rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
degrees = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
datainfo = { # list of requested labels per datatype
'tensor': {'len':9,
'label':[]},
'vector': {'len':3,
'label':[]},
'quaternion': {'len':4,
'label':[]},
}
if options.output == None or (not set(options.output).issubset(set(outputChoices))):
parser.error('output must be chosen from %s...'%(', '.join(outputChoices)))
input=[]
if options.eulers != None:
datainfo['vector']['label'] += [options.eulers]
input.append('eulers')
if options.a != None and \
options.b != None and \
options.c != None:
datainfo['vector']['label'] += [options.a,options.b,options.c]
input.append('frame')
if options.matrix != None:
datainfo['tensor']['label'] += [options.matrix]
input.append('matrix')
if options.quaternion != None:
datainfo['quaternion']['label'] += [options.quaternion]
input.append('quaternion')
if len(input) != 1: parser.error('needs exactly one input format...')
input = input[0]
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
options.output = map(lambda x: x.lower(), options.output) options.output = map(lambda x: x.lower(), options.output)
if options.output == [] or (not set(options.output).issubset(set(outputChoices))):
parser.error('output must be chosen from {}.'.format(', '.join(outputChoices)))
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:]) input = [options.eulers != None,
options.a != None and \
options.b != None and \
options.c != None,
options.matrix != None,
options.quaternion != None,
]
# --- loop over input files ------------------------------------------------------------------------- if np.sum(input) != 1: parser.error('needs exactly one input format.')
if filenames == []:
filenames = ['STDIN'] (label,dim,inputtype) = [(options.eulers,3,'eulers'),
([options.a,options.b,options.c],[3,3,3],'frame'),
(options.matrix,9,'matrix'),
(options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:]) # pre-rotation
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table # ------------------------------------------ read header ------------------------------------------
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
column = {} table.head_read()
missingColumns = False
for datatype,info in datainfo.items(): # ------------------------------------------ sanity checks -----------------------------------------
for label in info['label']:
key = '1_'+label if info['len'] > 1 else label # non-special labels have to start with '1_'
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True # break if label not found
else:
column[label] = table.labels.index(key) # remember columns of requested data
if missingColumns: errors = []
remarks = []
if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
else: column = table.label_index(label)
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for output in options.output: for output in options.output:
if output == 'quaternion': if output == 'quaternion': table.labels_append(['{}_quat({})'.format( i+1,options.symmetry) for i in xrange(4)])
table.labels_append(['%i_quaternion_%s'%(i+1,options.symmetry) for i in xrange(4)]) if output == 'eulers': table.labels_append(['{}_eulers({})'.format(i+1,options.symmetry) for i in xrange(3)])
if output == 'eulers':
table.labels_append(['%i_eulers_%s'%(i+1,options.symmetry) for i in xrange(3)])
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
if input == 'eulers': if inputtype == 'eulers':
o = damask.Orientation(Eulers=toRadians*\ o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
np.array(map(float,table.data[column[options.eulers]:\
column[options.eulers]+datainfo['vector']['len']])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'matrix': elif inputtype == 'matrix':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
np.array([map(float,table.data[column[options.matrix]:\
column[options.matrix]+datainfo['tensor']['len']])]).reshape(3,3),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'frame': elif inputtype == 'frame':
o = damask.Orientation(matrix=\ o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
np.array([map(float,table.data[column[options.a]:\ table.data[column[1]:column[1]+3] + \
column[options.a]+datainfo['vector']['len']] + \ table.data[column[2]:column[2]+3])).reshape(3,3),
table.data[column[options.b]:\
column[options.b]+datainfo['vector']['len']] + \
table.data[column[options.c]:\
column[options.c]+datainfo['vector']['len']]
)]).reshape(3,3),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
elif input == 'quaternion': elif inputtype == 'quaternion':
o = damask.Orientation(quaternion=\ o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
np.array(map(float,table.data[column[options.quaternion]:\
column[options.quaternion]+datainfo['quaternion']['len']])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
o.quaternion = r*o.quaternion o.quaternion = r*o.quaternion
for output in options.output: for output in options.output:
if output == 'quaternion': if output == 'quaternion': table.data_append(o.asQuaternion())
table.data_append(o.asQuaternion()) if output == 'eulers': table.data_append(o.asEulers('Bunge'))
if output == 'eulers':
table.data_append(o.asEulers('Bunge'))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -19,59 +19,67 @@ Add column(s) containing Second Piola--Kirchhoff stress based on given column(s)
""", version = scriptID) """, version = scriptID)
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string', parser.add_option('-f','--defgrad',
dest = 'defgrad',
type = 'string', metavar = 'string',
help = 'heading of columns containing deformation gradient [%default]') help = 'heading of columns containing deformation gradient [%default]')
parser.add_option('-p','--stress', dest='stress', metavar='string', parser.add_option('-p','--stress',
dest = 'stress',
type = 'string', metavar = 'string',
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]') help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
parser.set_defaults(defgrad = 'f')
parser.set_defaults(stress = 'p') parser.set_defaults(defgrad = 'f',
stress = 'p',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ------------------------------------ # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process --------------------------------------------------- # ------------------------------------------ sanity checks ----------------------------------------
missingColumns = False
column={ 'defgrad': table.labels.index('1_'+options.defgrad), errors = []
'stress': table.labels.index('1_'+options.stress)} column = {}
for key in column:
if column[key]<1: for tensor in [options.defgrad,options.stress]:
file['croak'].write('column %s not found...\n'%key) dim = table.label_dimension(tensor)
missingColumns=True if dim < 0: errors.append('column {} not found.'.format(tensor))
if missingColumns: continue elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
else:
column[tensor] = table.label_index(tensor)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header -------------------------------------- # ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['%i_S'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels table.labels_append(['%i_S'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
F = np.array(map(float,table.data[column['defgrad']:column['defgrad']+9]),'d').reshape(3,3) F = np.array(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9]),'d').reshape(3,3)
P = np.array(map(float,table.data[column['stress'] :column['stress']+9]),'d').reshape(3,3) P = np.array(map(float,table.data[column[options.stress ]:column[options.stress ]+9]),'d').reshape(3,3)
table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1] table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1]
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -18,127 +18,116 @@ Add x,y coordinates of stereographic projection of given direction (pole) in cry
""", version = scriptID) """, version = scriptID)
parser.add_option('-p', '--pole', dest='pole', type='float', nargs=3, metavar='float float float', parser.add_option('-p', '--pole',
dest = 'pole',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'crystal frame direction for pole figure [%default]') help = 'crystal frame direction for pole figure [%default]')
parser.add_option('--polar', dest='polar', action='store_true', parser.add_option('--polar',
dest = 'polar',
action = 'store_true',
help = 'output polar coordinates r,phi [%default]') help = 'output polar coordinates r,phi [%default]')
parser.add_option('-e', '--eulers', dest='eulers', metavar='string', parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'Euler angles label') help = 'Euler angles label')
parser.add_option('-d', '--degrees', dest='degrees', action='store_true', parser.add_option('-d', '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'Euler angles are given in degrees [%default]') help = 'Euler angles are given in degrees [%default]')
parser.add_option('-m', '--matrix', dest='matrix', metavar='string', parser.add_option('-m', '--matrix',
dest = 'matrix',
type = 'string', metavar = 'string',
help = 'orientation matrix label') help = 'orientation matrix label')
parser.add_option('-a', dest='a', metavar='string', parser.add_option('-a',
dest = 'a',
type = 'string', metavar = 'string',
help = 'crystal frame a vector label') help = 'crystal frame a vector label')
parser.add_option('-b', dest='b', metavar='string', parser.add_option('-b',
dest = 'b',
type = 'string', metavar = 'string',
help = 'crystal frame b vector label') help = 'crystal frame b vector label')
parser.add_option('-c', dest='c', metavar='string', parser.add_option('-c',
dest = 'c',
type = 'string', metavar = 'string',
help = 'crystal frame c vector label') help = 'crystal frame c vector label')
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string', parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar = 'string',
help = 'quaternion label') help = 'quaternion label')
parser.set_defaults(pole = (1.0,0.0,0.0))
parser.set_defaults(degrees = False) parser.set_defaults(pole = (1.0,0.0,0.0),
parser.set_defaults(polar = False) degrees = False,
polar = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
datainfo = { # list of requested labels per datatype input = [options.eulers != None,
'tensor': {'len':9, options.a != None and \
'label':[]},
'vector': {'len':3,
'label':[]},
'quaternion': {'len':4,
'label':[]},
}
input = []
if options.eulers != None:
datainfo['vector']['label'] += [options.eulers]
input += ['eulers']
if options.a != None and \
options.b != None and \ options.b != None and \
options.c != None: options.c != None,
datainfo['vector']['label'] += [options.a,options.b,options.c] options.matrix != None,
input += ['frame'] options.quaternion != None,
if options.matrix != None: ]
datainfo['tensor']['label'] += [options.matrix]
input += ['matrix']
if options.quaternion != None:
datainfo['quaternion']['label'] += [options.quaternion]
input += ['quaternion']
if len(input) != 1: parser.error('needs exactly one input format...') if np.sum(input) != 1: parser.error('needs exactly one input format.')
input = input[0]
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians (label,dim,inputtype) = [(options.eulers,3,'eulers'),
([options.a,options.b,options.c],[3,3,3],'frame'),
(options.matrix,9,'matrix'),
(options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
pole = np.array(options.pole) pole = np.array(options.pole)
pole /= np.linalg.norm(pole) pole /= np.linalg.norm(pole)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files ------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table # ------------------------------------------ read header ------------------------------------------
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
column = {} table.head_read()
missingColumns = False
for datatype,info in datainfo.items(): # ------------------------------------------ sanity checks ----------------------------------------
for label in info['label']:
key = list(set([label, '1_'+label]) & set(table.labels))
if key == []:
file['croak'].write('column %s not found...\n'%label)
missingColumns = True # break if label not found
else:
column[label] = table.labels.index(key[0]) # remember columns of requested data
if missingColumns: errors = []
remarks = []
if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
else: column = table.label_index(label)
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%i_pole_%g%g%g'%(i+1,options.pole[0],options.pole[1],options.pole[2]) for i in xrange(2)])
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in xrange(2)])
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
if input == 'eulers': if inputtype == 'eulers':
o = damask.Orientation(Eulers=toRadians*\ o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians)
np.array(map(float,table.data[column[options.eulers]:\ elif inputtype == 'matrix':
column[options.eulers]+datainfo['vector']['len']])), o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose())
) elif inputtype == 'frame':
elif input == 'matrix': o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
o = damask.Orientation(matrix=\ table.data[column[1]:column[1]+3] + \
np.array([map(float,table.data[column[options.matrix]:\ table.data[column[2]:column[2]+3])).reshape(3,3))
column[options.matrix]+datainfo['tensor']['len']])]).reshape(np.sqrt(datainfo['tensor']['len']), elif inputtype == 'quaternion':
np.sqrt(datainfo['tensor']['len'])).transpose(), o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])))
)
elif input == 'frame':
o = damask.Orientation(matrix=\
np.array([map(float,table.data[column[options.a]:\
column[options.a]+datainfo['vector']['len']] + \
table.data[column[options.b]:\
column[options.b]+datainfo['vector']['len']] + \
table.data[column[options.c]:\
column[options.c]+datainfo['vector']['len']]
)]).reshape(3,3),
)
elif input == 'quaternion':
o = damask.Orientation(quaternion=\
np.array(map(float,table.data[column[options.quaternion]:\
column[options.quaternion]+datainfo['quaternion']['len']])),
)
rotatedPole = o.quaternion*pole # rotate pole according to crystal orientation rotatedPole = o.quaternion*pole # rotate pole according to crystal orientation
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
@ -147,10 +136,7 @@ for name in filenames:
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -19,67 +19,70 @@ Add column(s) containing eigenvalues and eigenvectors of requested tensor column
""", version = scriptID) """, version = scriptID)
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.tensor == None: if options.tensor == None:
parser.error('no data column specified...') parser.error('no data column specified.')
datainfo = { # list of requested labels per datatype
'tensor': {'len':9,
'label':[]},
}
datainfo['tensor']['label'] += options.tensor
# ------------------------------------------ setup file handles ------------------------------------
files = []
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],True) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = [] for name in filenames:
column = defaultdict(dict) if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
for label in datainfo['tensor']['label']: # ------------------------------------------ read header ------------------------------------------
key = '1_%s'%label
if key not in table.labels: table.head_read()
file['croak'].write('column %s not found...\n'%key)
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
}
errors = []
remarks = []
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else: else:
active.append(label) items[type]['column'].append(table.label_index(what))
column[label] = table.labels.index(key) # remember columns of requested data table.labels_append(['{}_eigval({})'.format(i+1,what) for i in xrange(3)]) # extend ASCII header with new labels
table.labels_append(['{}_eigvec({})'.format(i+1,what) for i in xrange(9)]) # extend ASCII header with new labels
# ------------------------------------------ assemble header --------------------------------------- if remarks != []: table.croak(remarks)
for label in active: if errors != []:
table.labels_append(['%i_eigval(%s)'%(i+1,label) for i in xrange(3)]) # extend ASCII header with new labels table.croak(errors)
table.labels_append(['%i_eigvec(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for label in active: # loop over requested data for type, data in items.iteritems():
tensor = np.array(map(float,table.data[column[label]:column[label]+datainfo['tensor']['len']])).\ for column in data['column']:
reshape((datainfo['tensor']['len']//3,3)) (u,v) = np.linalg.eig(np.array(map(float,table.data[column:column+data['dim']])).reshape(data['shape']))
(u,v) = np.linalg.eig(tensor)
table.data_append(list(u)) table.data_append(list(u))
table.data_append(list(v.transpose().reshape(datainfo['tensor']['len']))) table.data_append(list(v.transpose().reshape(data['dim'])))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -11,12 +11,13 @@ scriptID = string.replace('$Id$','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0] scriptName = os.path.splitext(scriptID.split()[1])[0]
def operator(stretch,strain,eigenvalues): def operator(stretch,strain,eigenvalues):
''' Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102 '''
return { return {
'V#ln': np.log(eigenvalues) , 'V#ln': np.log(eigenvalues) ,
'U#ln': np.log(eigenvalues) , 'U#ln': np.log(eigenvalues) ,
'V#Biot': ( np.ones(3,'d') - 1.0/eigenvalues ) , 'V#Biot': ( np.ones(3,'d') - 1.0/eigenvalues ) ,
'U#Biot': ( eigenvalues - np.ones(3,'d') ) , 'U#Biot': ( eigenvalues - np.ones(3,'d') ) ,
'V#Green': ( np.ones(3,'d') - 1.0/eigenvalues*eigenvalues) *0.5, 'V#Green': ( np.ones(3,'d') - 1.0/eigenvalues/eigenvalues) *0.5,
'U#Green': ( eigenvalues*eigenvalues - np.ones(3,'d')) *0.5, 'U#Green': ( eigenvalues*eigenvalues - np.ones(3,'d')) *0.5,
}[stretch+'#'+strain] }[stretch+'#'+strain]
@ -30,29 +31,43 @@ Add column(s) containing given strains based on given stretches of requested def
""", version = scriptID) """, version = scriptID)
parser.add_option('-u','--right', dest='right', action='store_true', parser.add_option('-u','--right',
dest = 'right',
action = 'store_true',
help = 'material strains based on right Cauchy--Green deformation, i.e., C and U') help = 'material strains based on right Cauchy--Green deformation, i.e., C and U')
parser.add_option('-v','--left', dest='left', action='store_true', parser.add_option('-v','--left',
dest = 'left',
action = 'store_true',
help = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V') help = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V')
parser.add_option('-0','--logarithmic', dest='logarithmic', action='store_true', parser.add_option('-0','--logarithmic',
dest = 'logarithmic',
action = 'store_true',
help = 'calculate logarithmic strain tensor') help = 'calculate logarithmic strain tensor')
parser.add_option('-1','--biot', dest='biot', action='store_true', parser.add_option('-1','--biot',
dest = 'biot',
action = 'store_true',
help = 'calculate biot strain tensor') help = 'calculate biot strain tensor')
parser.add_option('-2','--green', dest='green', action='store_true', parser.add_option('-2','--green',
dest = 'green',
action = 'store_true',
help = 'calculate green strain tensor') help = 'calculate green strain tensor')
parser.add_option('-f','--defgrad', dest='defgrad', action='extend', metavar = '<string LIST>', parser.add_option('-f','--defgrad',
dest = 'defgrad',
action = 'extend',
metavar = '<string LIST>',
help = 'heading(s) of columns containing deformation tensor values [%default]') help = 'heading(s) of columns containing deformation tensor values [%default]')
parser.set_defaults(right = False)
parser.set_defaults(left = False) parser.set_defaults(right = False,
parser.set_defaults(logarithmic = False) left = False,
parser.set_defaults(biot = False) logarithmic = False,
parser.set_defaults(green = False) biot = False,
parser.set_defaults(defgrad = ['f']) green = False,
defgrad = ['f'],
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
stretches = [] stretches = []
stretch = {}
strains = [] strains = []
if options.right: stretches.append('U') if options.right: stretches.append('U')
@ -61,68 +76,75 @@ if options.logarithmic: strains.append('ln')
if options.biot: strains.append('Biot') if options.biot: strains.append('Biot')
if options.green: strains.append('Green') if options.green: strains.append('Green')
# ------------------------------------------ setup file handles ------------------------------------ if options.defgrad == None:
files = [] parser.error('no data column specified.')
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) # --- loop over input files -------------------------------------------------------------------------
else:
if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files --------------------------------- # ------------------------------------------ read header ------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table.head_read()
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process --------------------------------------------------- # ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.defgrad, 'column': []},
}
errors = [] errors = []
active = [] remarks = []
for i,length in enumerate(table.label_dimension(options.defgrad)):
if length == 9:
active.append(options.defgrad[i])
else:
errors.append('no deformation gradient tensor (1..9_%s) found...'%options.defgrad[i])
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else:
items[type]['column'].append(table.label_index(what))
for theStretch in stretches:
for theStrain in strains:
table.labels_append(['{}_{}({}){}'.format(i+1, # extend ASCII header with new labels
theStrain,
theStretch,
label if label != 'f' else '') for i in xrange(9)])
if remarks != []: table.croak(remarks)
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)+'\n') table.croak(errors)
table.close(dismiss = True) table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header --------------------------------------
for label in active: table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for theStretch in stretches:
for theStrain in strains:
table.labels_append(['%i_%s(%s)%s'%(i+1,
theStrain,
theStretch,
label if label != 'f' else '') for i in xrange(9)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
stretch = {}
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for column in table.label_index(active): # loop over all requested norms for column in items['tensor']['column']: # loop over all requested defgrads
F = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3) F = np.array(map(float,table.data[column:column+items['tensor']['dim']]),'d').reshape(items['tensor']['shape'])
(U,S,Vh) = np.linalg.svd(F) (U,S,Vh) = np.linalg.svd(F) # singular value decomposition
R = np.dot(U,Vh) R = np.dot(U,Vh) # rotation of polar decomposition
stretch['U'] = np.dot(np.linalg.inv(R),F) stretch['U'] = np.dot(np.linalg.inv(R),F) # F = RU
stretch['V'] = np.dot(F,np.linalg.inv(R)) stretch['V'] = np.dot(F,np.linalg.inv(R)) # F = VR
for theStretch in stretches: for theStretch in stretches:
for i in xrange(9): stretch[theStretch] = np.where(abs(stretch[theStretch]) < 1e-12, 0, stretch[theStretch])) # kill nasty noisy data
if abs(stretch[theStretch][i%3,i//3]) < 1e-12: # kill nasty noisy data
stretch[theStretch][i%3,i//3] = 0.0
(D,V) = np.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix) (D,V) = np.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix)
neg = np.where(D < 0.0) # find negative eigenvalues ...
D[neg] *= -1. # ... flip value ...
V[:,neg] *= -1. # ... and vector
for i,eigval in enumerate(D): for i,eigval in enumerate(D):
if eigval < 0.0: # flip negative eigenvalues
D[i] = -D[i]
V[:,i] = -V[:,i]
if np.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality if np.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality
V[:,(i+1)%3] = np.cross(V[:,(i+2)%3],V[:,i]) # correct next vector V[:,(i+1)%3] = np.cross(V[:,(i+2)%3],V[:,i]) # correct next vector
V[:,(i+1)%3] /= np.sqrt(np.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?) V[:,(i+1)%3] /= np.sqrt(np.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?)
@ -131,11 +153,12 @@ for file in files:
eps = (np.dot(V,np.dot(np.diag(d),V.T)).real).reshape(9) # build tensor back from eigenvalue/vector basis eps = (np.dot(V,np.dot(np.diag(d),V.T)).real).reshape(9) # build tensor back from eigenvalue/vector basis
table.data_append(list(eps)) table.data_append(list(eps))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.close() # close ASCII table outputAlive = table.data_write() # output processed line
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new # ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -21,41 +21,39 @@ Examples:
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average. For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', type="string", metavar='string', parser.add_option('-l','--label',
dest = 'label',
type = 'string', metavar = 'string',
help = 'column label for grouping rows') help = 'column label for grouping rows')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.label == None: if options.label == None:
parser.error('No sorting column specified.') parser.error('no grouping column specified.')
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: if filenames == []: filenames = ['STDIN']
filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') outname = options.label+'_averaged_'+name,
else: buffered = False)
if not os.path.exists(name): continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table # ------------------------------------------ sanity checks ---------------------------------------
table.head_read() # read ASCII header info
table.head_read()
if table.label_dimension(options.label) != 1: if table.label_dimension(options.label) != 1:
file['croak'].write('column {0} is not of scalar dimension...\n'.format(options.label)) table.croak('column {} is not of scalar dimension.'.format(options.label))
table.close(dismiss = True) # close ASCIItable and remove empty file table.close(dismiss = True) # close ASCIItable and remove empty file
continue continue
# ------------------------------------------ assemble info ---------------------------------------
# ------------------------------------------ assemble header ----------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.info_append(string.replace(scriptID,'\n','\\n') + \
'\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data -------------------------------- # ------------------------------------------ process data --------------------------------
@ -78,8 +76,4 @@ for name in filenames:
# ------------------------------------------ output result ------------------------------- # ------------------------------------------ output result -------------------------------
table.data_writeArray() table.data_writeArray()
table.output_flush() # just in case of buffered ASCII table
table.close() # close ASCII table table.close() # close ASCII table
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',options.label+'_averaged_'+file['name']) # overwrite old one with tmp new

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,re,sys,string,fnmatch,numpy as np import os,re,sys,string,fnmatch,math,random,numpy as np
from optparse import OptionParser from optparse import OptionParser
import damask import damask
@ -21,32 +21,39 @@ All rows where label 'foo' equals 'bar' -- " #foo# == \"bar\" "
""", version = scriptID) """, version = scriptID)
parser.add_option('-w','--white', dest='whitelist', action='extend', metavar='<string LIST>', parser.add_option('-w','--white',
dest = 'whitelist',
action = 'extend', metavar = '<string LIST>',
help = 'whitelist of column labels (a,b,c,...)') help = 'whitelist of column labels (a,b,c,...)')
parser.add_option('-b','--black', dest='blacklist', action='extend', metavar='<string LIST>', parser.add_option('-b','--black',
dest = 'blacklist',
action = 'extend', metavar='<string LIST>',
help = 'blacklist of column labels (a,b,c,...)') help = 'blacklist of column labels (a,b,c,...)')
parser.add_option('-c','--condition', dest='condition', metavar='string', parser.add_option('-c','--condition',
dest = 'condition', metavar='string',
help = 'condition to filter rows') help = 'condition to filter rows')
parser.set_defaults(condition = '')
parser.set_defaults(condition = '',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if filenames == []:
filenames = ['STDIN']
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
for name in filenames:
if name == 'STDIN':
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
else:
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table if filenames == []: filenames = ['STDIN']
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) for name in filenames:
if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ assemble info ---------------------------------------
table.head_read()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info
# ------------------------------------------ process data ---------------------------------------
specials = { \ specials = { \
'_row_': 0, '_row_': 0,
@ -55,18 +62,21 @@ for name in filenames:
positions = [] positions = []
for position,label in enumerate(table.labels): for position,label in enumerate(table.labels):
if (options.whitelist == None or any([fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \ if (options.whitelist == None or any([ position in table.label_indexrange(needle) \
and (options.blacklist == None or not any([fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep? or fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
and (options.blacklist == None or not any([ position in table.label_indexrange(needle) \
or fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
labels.append(label) # remember name... labels.append(label) # remember name...
positions.append(position) # ...and position positions.append(position) # ...and position
if options.whitelist != None and options.blacklist == None: # check whether reordering is possible if len(labels) > 0 and options.whitelist != None and options.blacklist == None: # check whether reordering is possible
position = np.zeros(len(labels)) position = np.zeros(len(labels))
for i,label in enumerate(labels): # check each selected label for i,label in enumerate(labels): # check each selected label
match = [fnmatch.fnmatch(label,needle) for needle in options.whitelist] # which whitelist items do match it match = [ positions[i] in table.label_indexrange(needle) \
or fnmatch.fnmatch(label,needle) for needle in options.whitelist] # which whitelist items do match it
position[i] = match.index(True) if np.sum(match) == 1 else -1 # unique match --> store which position[i] = match.index(True) if np.sum(match) == 1 else -1 # unique match --> store which
sorted = np.argsort(position) sorted = np.lexsort((labels,position))
order = range(len(labels)) if sorted[0] < 0 else sorted # skip reordering if non-unique, i.e. first sorted is "-1" order = range(len(labels)) if sorted[0] < 0 else sorted # skip reordering if non-unique, i.e. first sorted is "-1"
else: else:
order = range(len(labels)) # maintain original order of labels order = range(len(labels)) # maintain original order of labels
@ -90,10 +100,13 @@ for name in filenames:
evaluator = "'" + condition + "'.format(" + ','.join(interpolator) + ")" evaluator = "'" + condition + "'.format(" + ','.join(interpolator) + ")"
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.labels = np.array(labels)[order] # update with new label set
table.labels_clear()
table.labels_append(np.array(labels)[order]) # update with new label set
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process and output data ------------------------------------------
positions = np.array(positions)[order] positions = np.array(positions)[order]
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
@ -102,10 +115,8 @@ for name in filenames:
table.data = [table.data[position] for position in positions] # retain filtered columns table.data = [table.data[position] for position in positions] # retain filtered columns
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ finalize output -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN': if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -19,36 +19,66 @@ Generate PNG image from data in given column (or 2D data of overall table).
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', type='string', parser.add_option('-l','--label',
help='column containing data [all])') dest = 'label',
parser.add_option('-r','--range', dest='range', type='float', nargs=2, type = 'string', metavar = 'string',
help = 'column containing data [all]')
parser.add_option('-r','--range',
dest = 'range',
type = 'float', nargs = 2, metavar = 'float float',
help = 'data range (min max) [auto]') help = 'data range (min max) [auto]')
parser.add_option('--gap', '--transparent', dest='gap', type='float', parser.add_option('--gap', '--transparent',
dest = 'gap',
type = 'float', metavar = 'float',
help = 'value to treat as transparent [%default]') help = 'value to treat as transparent [%default]')
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=2, parser.add_option('-d','--dimension',
dest = 'dimension',
type = 'int', nargs = 2, metavar = 'int int',
help = 'data dimension (width height) [native]') help = 'data dimension (width height) [native]')
parser.add_option('--abs', dest='abs', action='store_true', parser.add_option('--color',
help='magnitude of values') dest = 'color',
parser.add_option('--log', dest='log', action='store_true', type = 'string', metavar = 'string',
help='log10 of values') help = 'color scheme [%default]')
parser.add_option('--fliplr', dest='flipLR', action='store_true', parser.add_option('--invert',
help='flip around vertical axis') dest = 'invert',
parser.add_option('--flipud', dest='flipUD', action='store_true', action = 'store_true',
help='flip around horizontal axis')
parser.add_option('--color', dest='color', type='string',
help='color scheme')
parser.add_option('--invert', dest='invert', action='store_true',
help = 'invert color scheme') help = 'invert color scheme')
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar='LEFT RIGHT TOP BOTTOM', parser.add_option('--abs',
dest = 'abs',
action = 'store_true',
help = 'magnitude of values')
parser.add_option('--log',
dest = 'log',
action = 'store_true',
help = 'log10 of values')
parser.add_option('--fliplr',
dest = 'flipLR',
action = 'store_true',
help = 'flip around vertical axis')
parser.add_option('--flipud',
dest = 'flipUD',
action = 'store_true',
help = 'flip around horizontal axis')
parser.add_option('--crop',
dest = 'crop',
type = 'int', nargs = 4, metavar = 'int int int int',
help = 'pixels cropped on left, right, top, bottom') help = 'pixels cropped on left, right, top, bottom')
parser.add_option('--show', dest='show', action='store_true', parser.add_option('-N','--pixelsize',
help='show resulting image') dest = 'pixelsize',
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int', type = 'int', metavar = 'int',
help = 'pixel per data point') help = 'pixel per data point')
parser.add_option('-x','--pixelsizex', dest='pixelsizex', type='int', parser.add_option('-x','--pixelsizex',
dest = 'pixelsizex',
type = 'int', metavar = 'int',
help = 'pixel per data point along x') help = 'pixel per data point along x')
parser.add_option('-y','--pixelsizey', dest='pixelsizey', type='int', parser.add_option('-y','--pixelsizey',
dest = 'pixelsizey',
type = 'int', metavar = 'int',
help = 'pixel per data point along y') help = 'pixel per data point along y')
parser.add_option('--show',
dest = 'show',
action = 'store_true',
help = 'show resulting image')
parser.set_defaults(label = None, parser.set_defaults(label = None,
range = [0.0,0.0], range = [0.0,0.0],
@ -78,33 +108,27 @@ if options.invert: theMap = theMap.invert()
theColors = np.uint8(np.array(theMap.export(format = 'list',steps = 256))*255) theColors = np.uint8(np.array(theMap.export(format = 'list',steps = 256))*255)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') outname = None,
else: buffered = False,
if not os.path.exists(name): continue labeled = options.label != None,
file = {'name':name, readonly = True)
'input':open(name), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'output':open(os.path.splitext(name)[0]+\
('_%s'%(options.label) if options.label != None else '')+\
'.png','w'),
'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'], # ------------------------------------------ read header ------------------------------------------
buffered = False, # make unbuffered ASCII_table
labels = options.label != None) # no labels when taking 2D dataset table.head_read()
table.head_read() # read ASCII header info
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
missing_labels = table.data_readArray(options.label) missing_labels = table.data_readArray(options.label)
if len(missing_labels) > 0: if len(missing_labels) > 0:
file['croak'].write('column %s not found...\n'%options.label) table.croak('column {} not found.'.format(options.label))
table.close(dismiss = True) # close ASCIItable and remove empty file table.close(dismiss = True) # close ASCIItable and remove empty file
continue continue
@ -119,7 +143,7 @@ for name in filenames:
if np.all(np.array(options.range) == 0.0): if np.all(np.array(options.range) == 0.0):
options.range = [table.data[mask].min(), options.range = [table.data[mask].min(),
table.data[mask].max()] table.data[mask].max()]
file['croak'].write('data range: {0} {1}\n'.format(*options.range)) table.croak('data range: {0} {1}'.format(*options.range))
delta = max(options.range) - min(options.range) delta = max(options.range) - min(options.range)
avg = 0.5*(max(options.range) + min(options.range)) avg = 0.5*(max(options.range) + min(options.range))
@ -139,7 +163,7 @@ for name in filenames:
repeat(options.pixelsizey,axis = 0) repeat(options.pixelsizey,axis = 0)
(height,width) = table.data.shape (height,width) = table.data.shape
file['croak'].write('image dimension: {0} x {1}\n'.format(width,height)) table.croak('image dimension: {0} x {1}'.format(width,height))
im = Image.fromarray(np.dstack((theColors[np.array(255*table.data,dtype = np.uint8)], im = Image.fromarray(np.dstack((theColors[np.array(255*table.data,dtype = np.uint8)],
255*mask.astype(np.uint8))), 'RGBA').\ 255*mask.astype(np.uint8))), 'RGBA').\
@ -149,8 +173,12 @@ for name in filenames:
height-options.crop[3])) height-options.crop[3]))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
im.save(file['output'],format = "PNG")
if options.show: im.show()
table.input_close() # close input ASCII table im.save(sys.stdout if name == 'STDIN' else
table.output_close() # close output os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png',
format = "PNG")
table.close() # close ASCII table
if options.show: im.show()

View File

@ -19,38 +19,61 @@ Generate PNG image from scalar data on grid deformed by (periodic) deformation g
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', type='string', metavar='string', parser.add_option('-l','--label',
help='column containing data)') dest = 'label',
parser.add_option('-r','--range', dest='range', type='float', nargs=2, metavar='float float', type = 'string', metavar = 'string',
help = 'column containing data [all]')
parser.add_option('-r','--range',
dest = 'range',
type = 'float', nargs = 2, metavar = 'float float',
help = 'data range (min max) [auto]') help = 'data range (min max) [auto]')
parser.add_option('--color', dest='color', type='string', metavar='string', parser.add_option('--gap', '--transparent',
help='color scheme') dest = 'gap',
parser.add_option('--invert', dest='invert', action='store_true', type = 'float', metavar = 'float',
help='invert color scheme') help = 'value to treat as transparent [%default]')
parser.add_option('--abs', dest='abs', action='store_true', parser.add_option('-d','--dimension',
help='magnitude of values') dest = 'dimension',
parser.add_option('--log', dest='log', action='store_true', type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help='log10 of values')
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=3, metavar=' '.join(['int']*3),
help = 'data dimension (x/y/z)') help = 'data dimension (x/y/z)')
parser.add_option('-s','--size', dest='size', type='float', nargs=3, metavar=' '.join(['float']*3), parser.add_option('-s','--size',
dest = 'size',
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help = 'box size (x/y/z)') help = 'box size (x/y/z)')
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string', parser.add_option('-f','--defgrad',
dest = 'defgrad', metavar = 'string',
help = 'column label of deformation gradient [%default]') help = 'column label of deformation gradient [%default]')
parser.add_option('--scaling', dest='scaling', type='float', nargs=3, metavar = ' '.join(['float']*3), parser.add_option('--scaling',
help='x/y/z scaling of displacment fluctuation [%default]') dest = 'scaling',
parser.add_option('-z','--layer', dest='z', type='int', metavar='int', type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help = 'x/y/z scaling of displacement fluctuation [%default]')
parser.add_option('-z','--layer',
dest = 'z',
type = 'int', metavar = 'int',
help = 'index of z plane to plot [%default]') help = 'index of z plane to plot [%default]')
parser.add_option('--fliplr', dest='flipLR', action='store_true', parser.add_option('--color',
help='flip around vertical axis') dest = 'color',
parser.add_option('--flipud', dest='flipUD', action='store_true', type = 'string', metavar = 'string',
help='flip around horizontal axis') help = 'color scheme')
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar=' '.join(['int']*3), parser.add_option('--invert',
help='pixels cropped on left, right, top, bottom') dest = 'invert',
parser.add_option('--show', dest='show', action='store_true', action = 'store_true',
help='show resulting image') help = 'invert color scheme')
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int', metavar='int', parser.add_option('--abs',
dest = 'abs',
action = 'store_true',
help = 'magnitude of values')
parser.add_option('--log',
dest = 'log',
action = 'store_true',
help = 'log10 of values')
parser.add_option('-N','--pixelsize',
dest = 'pixelsize',
type = 'int', metavar = 'int',
help = 'pixels per cell edge') help = 'pixels per cell edge')
parser.add_option('--show',
dest = 'show',
action = 'store_true',
help = 'show resulting image')
parser.set_defaults(label = None, parser.set_defaults(label = None,
range = [0.0,0.0], range = [0.0,0.0],
@ -61,11 +84,8 @@ parser.set_defaults(label = None,
log = False, log = False,
defgrad = 'f', defgrad = 'f',
scaling = [1.,1.,1.], scaling = [1.,1.,1.],
flipLR = False,
flipUD = False,
color = "gray", color = "gray",
invert = False, invert = False,
crop = [0,0,0,0],
pixelsize = 1, pixelsize = 1,
show = False, show = False,
) )
@ -86,35 +106,28 @@ theColors = np.uint8(np.array(theMap.export(format='list',steps=256))*255)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: if filenames == []: filenames = ['STDIN']
filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') outname = None,
else: buffered = False,
if not os.path.exists(name): continue labeled = options.label != None,
file = {'name':name, readonly = True)
'input':open(name), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'output':open(os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png','w'),
'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'], # ------------------------------------------ read header ------------------------------------------
buffered = False, # make unbuffered ASCII_table
labels = options.label != None) # no labels when taking 2D dataset table.head_read()
table.head_read() # read ASCII header info
# --------------- figure out columns to process --------------------------------------------------- # --------------- figure out columns to process ---------------------------------------------------
errors = [] errors = []
if table.label_dimension(options.label) != 1: if table.label_dimension(options.label) != 1:
errors.append('no scalar data (%s) found...'%options.label) errors.append('no scalar data ({}) found.'.format(options.label))
if table.label_dimension(options.defgrad) != 9: if table.label_dimension(options.defgrad) != 9:
errors.append('no deformation gradient tensor (1..9_%s) found...'%options.defgrad) errors.append('no deformation gradient tensor (1..9_{}) found.'.format(options.defgrad))
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)+'\n') file['croak'].write('\n'.join(errors)+'\n')
@ -123,8 +136,8 @@ for name in filenames:
table.data_readArray([options.label,options.defgrad]) table.data_readArray([options.label,options.defgrad])
F = table.data[:,1:10].transpose().reshape([3,3]+list(options.dimension),order='F')
data = table.data[:,0 ].transpose().reshape( list(options.dimension),order='F') data = table.data[:,0 ].transpose().reshape( list(options.dimension),order='F')
F = table.data[:,1:10].transpose().reshape([3,3]+list(options.dimension),order='F')
if options.abs: data = np.abs(data) if options.abs: data = np.abs(data)
if options.log: data = np.log10(data) if options.log: data = np.log10(data)
@ -165,24 +178,17 @@ for name in filenames:
nodes[0,x ,y+1,options.z], nodes[0,x ,y+1,options.z],
nodes[1,x ,y+1,options.z], nodes[1,x ,y+1,options.z],
], ],
fill = tuple(theColors[int(255*data[x,y,options.z])]), fill = tuple(theColors[int(255*data[x,y,options.z])],
0 if data[x,y,options.z] == options.gap else 255),
outline = None) outline = None)
# if options.flipLR: table.data = np.fliplr(table.data)
# if options.flipUD: table.data = np.flipud(table.data)
# (height,width,bands) = table.data.shape
# im = Image.fromarray(table.data.astype('uint8'), 'RGB').\
# crop(( options.crop[0],
# options.crop[2],
# width -options.crop[1],
# height-options.crop[3]))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
im.save(file['output'],format = "PNG") im.save(sys.stdout if name == 'STDIN' else
if options.show: im.show() os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png',
format = "PNG")
table.close() # close ASCII table file handles table.close() # close ASCII table
if options.show: im.show()

View File

@ -19,24 +19,42 @@ Generate PNG image from data in given column vector containing RGB tuples.
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', type='string', parser.add_option('-l','--label',
dest = 'label',
type = 'string', metavar = 'string',
help = 'column containing RGB triplet') help = 'column containing RGB triplet')
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=2, parser.add_option('-d','--dimension',
dest = 'dimension',
type = 'int', nargs = 2, metavar = 'int int',
help = 'data dimension (width height)') help = 'data dimension (width height)')
parser.add_option('--fliplr', dest='flipLR', action='store_true', parser.add_option('--fliplr',
dest = 'flipLR',
action = 'store_true',
help = 'flip around vertical axis') help = 'flip around vertical axis')
parser.add_option('--flipud', dest='flipUD', action='store_true', parser.add_option('--flipud',
dest = 'flipUD',
action = 'store_true',
help = 'flip around horizontal axis') help = 'flip around horizontal axis')
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar=' '.join(['int']*4), parser.add_option('--crop',
dest = 'crop',
type = 'int', nargs = 4, metavar = ' '.join(['int']*4),
help = 'pixels cropped on left, right, top, bottom') help = 'pixels cropped on left, right, top, bottom')
parser.add_option('--show', dest='show', action='store_true', parser.add_option('-N','--pixelsize',
help='show resulting image') dest = 'pixelsize',
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int', type = 'int', metavar = 'int',
help = 'pixels per data point') help = 'pixels per data point')
parser.add_option('-x','--pixelsizex', dest='pixelsizex', type='int', parser.add_option('-x','--pixelsizex',
dest = 'pixelsizex',
type = 'int', metavar = 'int',
help = 'pixels per data point along x') help = 'pixels per data point along x')
parser.add_option('-y','--pixelsizey', dest='pixelsizey', type='int', parser.add_option('-y','--pixelsizey',
dest = 'pixelsizey',
type = 'int', metavar = 'int',
help = 'pixels per data point along y') help = 'pixels per data point along y')
parser.add_option('--show',
dest = 'show',
action = 'store_true',
help = 'show resulting image')
parser.set_defaults(label = None, parser.set_defaults(label = None,
dimension = [], dimension = [],
@ -55,27 +73,21 @@ if options.dimension == []: parser.error('dimension of data array missing')
if options.pixelsize > 1: (options.pixelsizex,options.pixelsizey) = [options.pixelsize]*2 if options.pixelsize > 1: (options.pixelsizex,options.pixelsizey) = [options.pixelsize]*2
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') outname = None,
else: buffered = False,
if not os.path.exists(name): continue labeled = options.label != None,
file = {'name':name, readonly = True)
'input':open(name), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'output':open(os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png','w'),
'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'], # ------------------------------------------ read header ------------------------------------------
buffered = False, # make unbuffered ASCII_table
labels = True) table.head_read()
table.head_read() # read ASCII header info
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
@ -83,13 +95,13 @@ for name in filenames:
missing_labels = table.data_readArray(options.label) missing_labels = table.data_readArray(options.label)
if len(missing_labels) > 0: if len(missing_labels) > 0:
errors.append('column%s %s not found'%('s' if len(missing_labels) > 1 else '', errors.append('column{} {} not found'.format('s' if len(missing_labels) > 1 else '',
', '.join(missing_labels))) ', '.join(missing_labels)))
if table.label_dimension(options.label) != 3: if table.label_dimension(options.label) != 3:
errors.append('column %s has wrong dimension'%options.label) errors.append('column {} has wrong dimension'.format(options.label))
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)) table.croak(errors)
table.close(dismiss = True) # close ASCII table file handles and delete output file table.close(dismiss = True) # close ASCII table file handles and delete output file
continue continue
@ -98,13 +110,13 @@ for name in filenames:
if options.flipLR: table.data = np.fliplr(table.data) if options.flipLR: table.data = np.fliplr(table.data)
if options.flipUD: table.data = np.flipud(table.data) if options.flipUD: table.data = np.flipud(table.data)
table.data = table.data.\ table.data = table.data.repeat(options.pixelsizex,axis=1).\
repeat(options.pixelsizex,axis=1).\
repeat(options.pixelsizey,axis=0) repeat(options.pixelsizey,axis=0)
table.data *= 1. if np.any(table.data > 1.0) else 255.0 # ensure 8 bit data range table.data *= 1. if np.any(table.data > 1.0) else 255.0 # ensure 8 bit data range
(height,width,bands) = table.data.shape (height,width,bands) = table.data.shape
table.croak('image dimension: {0} x {1}'.format(width,height))
im = Image.fromarray(table.data.astype('uint8'), 'RGB').\ im = Image.fromarray(table.data.astype('uint8'), 'RGB').\
crop(( options.crop[0], crop(( options.crop[0],
@ -114,7 +126,11 @@ for name in filenames:
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
im.save(file['output'],format = "PNG") im.save(sys.stdout if name == 'STDIN' else
if options.show: im.show() os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png',
format = "PNG")
table.close() # close ASCII table file handles table.close() # close ASCII table
if options.show: im.show()

View File

@ -18,64 +18,80 @@ Permute all values in given column(s).
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
help='heading(s) of column to permute') dest = 'label',
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', action = 'extend', metavar = '<string LIST>',
help ='column(s) to permute')
parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar = 'int',
help = 'seed of random number generator [%default]') help = 'seed of random number generator [%default]')
parser.set_defaults(randomSeed = None)
parser.set_defaults(label = [],
randomSeed = None,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.label == None: if len(options.label) == 0:
parser.error('no data column specified...') parser.error('no labels specified.')
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if not os.path.exists(name): continue if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase # ------------------------------------------ read header ------------------------------------------
np.random.seed(randomSeed)
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.info_append('random seed %i'%randomSeed)
# --------------- figure out columns to process --------------------------------------------------- table.head_read()
active = []
column = {}
for label in options.label: # ------------------------------------------ process labels ---------------------------------------
if label in table.labels:
active.append(label) errors = []
column[label] = table.labels.index(label) # remember columns of requested data remarks = []
columns = []
dims = []
indices = table.label_index (options.label)
dimensions = table.label_dimension(options.label)
for i,index in enumerate(indices):
if index == -1: remarks.append('label {} not present...'.format(options.label[i]))
else: else:
file['croak'].write('column %s not found...\n'%label) columns.append(index)
dims.append(dimensions[i])
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file
np.random.seed(randomSeed)
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),
'random seed {}'.format(randomSeed),
])
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
permutation = {}
table.data_readArray(active)
for i,label in enumerate(active):
unique = list(set(table.data[:,i]))
permutated = np.random.permutation(unique)
permutation[label] = dict(zip(unique,permutated))
table.data_rewind() table.data_readArray() # read all data at once
table.head_read() # read ASCII header info again to get the completed data for col,dim in zip(columns,dims):
outputAlive = True table.data[:,col:col+dim] = np.random.permutation(table.data[:,col:col+dim])
while outputAlive and table.data_read(): # read next data line of ASCII table
for label in active: # loop over all requested stiffnesses
table.data[column[label]] = permutation[label][float(table.data[column[label]])] # apply permutation
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table table.data_writeArray()
table.output_close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new # ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -17,71 +17,72 @@ Rename scalar, vectorial, and/or tensorial data header labels.
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='label', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar='<string LIST>',
help = 'column(s) to rename') help = 'column(s) to rename')
parser.add_option('-s','--substitute', dest='substitute', action='extend', metavar='<string LIST>', parser.add_option('-s','--substitute',
help='new column label') dest = 'substitute',
action = 'extend', metavar='<string LIST>',
help = 'new column label(s)')
parser.set_defaults(label = []) parser.set_defaults(label = [],
parser.set_defaults(substitute = []) substitute = [],
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} # ------------------------------------------ read header ------------------------------------------
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
table.head_read() table.head_read()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info
# ------------------------------------------ process labels --------------------------------------- # ------------------------------------------ process labels ---------------------------------------
errors = [] errors = []
remarks = []
if options.label == []: if len(options.label) == 0:
errors.append('no labels specified...') errors.append('no labels specified.')
elif len(options.label) != len(options.substitute): elif len(options.label) != len(options.substitute):
errors.append('mismatch between number of labels ({0}) and substitutes ({1})...'.format(len(options.label), errors.append('mismatch between number of labels ({}) and substitutes ({}).'.format(len(options.label),
len(options.substitute))) len(options.substitute)))
else: # tag individual candidates else:
indices = table.label_index (options.label) indices = table.label_index (options.label)
dimensions = table.label_dimension(options.label) dimensions = table.label_dimension(options.label)
for i,index in enumerate(indices): for i,index in enumerate(indices):
if index == -1: if index == -1: remarks.append('label {} not present...'.format(options.label[i]))
errors.append('label %s not present...\n'%options.label[i])
else: else:
for j in xrange(dimensions[i]): for j in xrange(dimensions[i]):
table.labels[index+j] = table.labels[index+j].replace(options.label[i],options.substitute[i]) table.labels[index+j] = table.labels[index+j].replace(options.label[i],options.substitute[i])
if remarks != []: table.croak(remarks)
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)+'\n') table.croak(errors)
table.close(dismiss = True) table.close(dismiss = True)
continue continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.close() # close ASCII tables table.close() # close ASCII tables
if file['name'] != 'STDIN': if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -19,64 +19,77 @@ Rotate vector and/or tensor column data by given angle around given axis.
""", version = scriptID) """, version = scriptID)
parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '<string LIST>', parser.add_option('-v','--vector',
help = 'column heading of vector to rotate') dest = 'vector',
parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '<string LIST>', action = 'extend', metavar = '<string LIST>',
help = 'column heading of tensor to rotate') help = 'column heading of vector(s) to rotate')
parser.add_option('-r', '--rotation',dest = 'rotation', type = 'float', nargs = 4, metavar = ' '.join(['float']*4), parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'column heading of tensor(s) to rotate')
parser.add_option('-r', '--rotation',
dest = 'rotation',
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
help = 'angle and axis to rotate data [%default]') help = 'angle and axis to rotate data [%default]')
parser.add_option('-d', '--degrees', dest = 'degrees', action = 'store_true', parser.add_option('-d', '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'angles are given in degrees [%default]') help = 'angles are given in degrees [%default]')
parser.set_defaults(rotation = (0.,1.,1.,1.)) # no rotation about 1,1,1
parser.set_defaults(degrees = False) parser.set_defaults(rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
degrees = False,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
datainfo = { # list of requested labels per datatype if options.vector == None and options.tensor == None:
'vector': {'len':3, parser.error('no data column specified.')
'label':[]},
'tensor': {'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:]) q = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:])
R = r.asMatrix() R = q.asMatrix()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} # ------------------------------------------ read header ------------------------------------------
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
for type, data in items.iteritems():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process ---------------------------------------------------
active = defaultdict(list)
column = defaultdict(dict)
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_'+label
if key in table.labels:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
else:
file['croak'].write('column %s not found...\n'%label)
# ------------------------------------------ assemble header ---------------------------------------
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
@ -85,28 +98,21 @@ for name in filenames:
datatype = 'vector' datatype = 'vector'
for label in active[datatype] if datatype in active else []: # loop over all requested labels for column in items[datatype]['column']: # loop over all requested labels
table.data[column[datatype][label]:column[datatype][label]+datainfo[datatype]['len']] = \ table.data[column:column+items[datatype]['dim']] = \
r * np.array(map(float, r * np.array(map(float,table.data[column:column+items[datatype]['dim']]))
table.data[column[datatype][label]:\
column[datatype][label]+datainfo[datatype]['len']]))
datatype = 'tensor' datatype = 'tensor'
for label in active[datatype] if datatype in active else []: # loop over all requested labels for column in items[datatype]['column']: # loop over all requested labels
A = np.array(map(float,table.data[column[datatype][label]:\ table.data[column:column+items[datatype]['dim']] = \
column[datatype][label]+datainfo[datatype]['len']])).\ np.dot(R,np.dot(np.array(map(float,table.data[column:column+items[datatype]['dim']])).\
reshape(np.sqrt(datainfo[datatype]['len']), reshape(items[datatype]['shape']),R.transpose()
np.sqrt(datainfo[datatype]['len'])) ).reshape(items[datatype]['dim'])
table.data[column[datatype][label]:\
column[datatype][label]+datainfo[datatype]['len']] = \
np.dot(R,np.dot(A,R.transpose())).reshape(datainfo[datatype]['len'])
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush()
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -15,94 +15,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Uniformly scale values in scalar/special, vector, or tensor columns by given factor. Uniformly scale column values by given factor.
""", version = scriptID) """, version = scriptID)
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
help='heading of columns containing field values of special dimension') dest = 'label',
parser.add_option('-d','--dimension',dest='N', type='int', metavar='int', action = 'extend', metavar = '<string LIST>',
help='dimension of special field values [%default]') help ='column(s) to scale')
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-f','--factor',
help='column heading of vector to scale') dest = 'factor',
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', action = 'extend', metavar='<float LIST>',
help='column heading of tensor to scale') help = 'factor(s) per column')
parser.add_option('-f','--factor', dest='factor', action='extend', metavar='<float LIST>',
help='list of scalar/special, vector, and tensor scaling factors (in this order!)')
parser.set_defaults(special = []) parser.set_defaults(label = [],
parser.set_defaults(vector = []) )
parser.set_defaults(tensor = [])
parser.set_defaults(factor = [])
parser.set_defaults(N = 1)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
options.factor = np.array(options.factor,'d') if len(options.label) != len(options.factor):
datainfo = { # list of requested labels per datatype parser.error('number of column labels and factors do not match.')
'special': {'len':options.N,
'label':[]},
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
}
length = 0
if options.special != []: datainfo['special']['label'] += options.special; length += len(options.special)
if options.vector != []: datainfo['vector']['label'] += options.vector; length += len(options.vector)
if options.tensor != []: datainfo['tensor']['label'] += options.tensor; length += len(options.tensor)
if len(options.factor) != length:
parser.error('length of scaling vector does not match column count...')
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ read header ------------------------------------------
table.head_read()
errors = []
remarks = []
columns = []
dims = []
factors = []
for what,factor in zip(options.label,options.factor):
col = table.label_index(what)
if col < 0: remarks.append('column {} not found...'.format(what,type))
else: else:
if not os.path.exists(name): continue columns.append(col)
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} factors.append(float(factor))
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') dims.append(table.label_dimension(what))
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table if remarks != []: table.croak(remarks)
table.head_read() # read ASCII header info if errors != []:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.croak(errors)
table.close(dismiss = True)
# --------------- figure out columns to process --------------------------------------------------- continue
active = defaultdict(list)
column = defaultdict(dict)
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_'+label if info['len'] > 1 else label
if key in table.labels:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
else:
file['croak'].write('column %s not found...\n'%label)
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
i = 0 for col,dim,factor in zip(columns,dims,factors): # loop over items
for datatype,labels in sorted(active.items(),key=lambda x:datainfo[x[0]]['len']): # loop over special,vector,tensor table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d')
for label in labels: # loop over all requested labels
for j in xrange(datainfo[datatype]['len']): # loop over entity elements
table.data[column[datatype][label]+j] = float(table.data[column[datatype][label]+j]) * options.factor[i]
i += 1
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -7,7 +7,7 @@ from collections import defaultdict
from optparse import OptionParser from optparse import OptionParser
import damask import damask
scriptID = string.replace('$Id$','\n','\\n') scriptID = '$Id$'
scriptName = os.path.splitext(scriptID.split()[1])[0] scriptName = os.path.splitext(scriptID.split()[1])[0]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@ -15,94 +15,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Shift values of scalar/special, vector, or tensor columns by given offset. Uniformly shift column values by given offset.
""", version = scriptID) """, version = scriptID)
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
help='heading of columns containing field values of special dimension') dest = 'label',
parser.add_option('-d','--dimension',dest='N', type='int', metavar='int', action = 'extend', metavar = '<string LIST>',
help='dimension of special field values [%default]') help ='column(s) to shift')
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>', parser.add_option('-o','--offset',
help='column heading to shift by vector') dest = 'offset',
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>', action = 'extend', metavar='<float LIST>',
help='column heading to shift by tensor') help = 'offset(s) per column')
parser.add_option('-o','--offset', dest='delta', action='extend', metavar='<float LIST>',
help='list of scalar/special, vector, and tensor shifts (in this order!)')
parser.set_defaults(special = []) parser.set_defaults(label = [],
parser.set_defaults(vector = []) )
parser.set_defaults(tensor = [])
parser.set_defaults(delta = [])
parser.set_defaults(N = 1)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
options.delta = np.array(options.delta,'d') if len(options.label) != len(options.delta):
datainfo = { # list of requested labels per datatype parser.error('number of column labels and offsets do not match.')
'special': {'len':options.N,
'label':[]},
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
}
length = 0
if options.special != []: datainfo['special']['label'] += options.special; length += len(options.special)
if options.vector != []: datainfo['vector']['label'] += options.vector; length += len(options.vector)
if options.tensor != []: datainfo['tensor']['label'] += options.tensor; length += len(options.tensor)
if len(options.delta) != length:
parser.error('length of offset vector does not match column types...')
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ read header ------------------------------------------
table.head_read()
errors = []
remarks = []
columns = []
dims = []
offsets = []
for what,factor in zip(options.label,options.offset):
col = table.label_index(what)
if col < 0: remarks.append('column {} not found...'.format(what,type))
else: else:
if not os.path.exists(name): continue columns.append(col)
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} offsets.append(float(offset))
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') dims.append(table.label_dimension(what))
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table if remarks != []: table.croak(remarks)
table.head_read() # read ASCII header info if errors != []:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.croak(errors)
table.close(dismiss = True)
# --------------- figure out columns to process --------------------------------------------------- continue
active = defaultdict(list)
column = defaultdict(dict)
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_'+label if info['len'] > 1 else label # non-special labels have to start with '1_'
if key in table.labels:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
else:
file['croak'].write('column %s not found...\n'%label)
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
i = 0 for col,dim,factor in zip(columns,dims,factors): # loop over items
for datatype,labels in sorted(active.items(),key=lambda x:datainfo[x[0]]['len']): # loop over scalar,vector,tensor table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d')
for label in labels: # loop over all requested labels
for j in xrange(datainfo[datatype]['len']): # loop over entity elements
table.data[column[datatype][label]+j] = float(table.data[column[datatype][label]+j]) + options.delta[i]
i += 1
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output finalization -----------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table (works for stdin) table.close() # close ASCII tables
table.output_close() # close output ASCII table (works for stdout) if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -18,43 +18,62 @@ Show components of given ASCIItable(s).
""", version = scriptID) """, version = scriptID)
parser.add_option('-a','--head', dest='head', action='store_true', parser.add_option('-d','--data',
help='output all heading (info + labels)') dest = 'data',
parser.add_option('-i','--info', dest='info', action='store_true', action = 'store_true',
help='output info lines')
parser.add_option('-l','--labels', dest='labels', action='store_true',
help='output labels')
parser.add_option('-d','--data', dest='data', action='store_true',
help = 'output data') help = 'output data')
parser.add_option('-c','--column', dest='col', action='store_true', parser.add_option('-a','--head',
help='switch to label column format') dest = 'head',
parser.add_option('--nolabels', dest='nolabels', action='store_true', action = 'store_true',
help = 'output all heading (info + labels)')
parser.add_option('-i','--info',
dest = 'info',
action = 'store_true',
help = 'output info lines')
parser.add_option('-l','--labels',
dest = 'labels',
action = 'store_true',
help = 'output labels')
parser.add_option('-c','--column',
dest = 'col',
action = 'store_true',
help = 'print labels as one column')
parser.add_option('--nolabels',
dest = 'labeled',
action = 'store_false',
help = 'table has no labels') help = 'table has no labels')
parser.set_defaults(col = False) parser.add_option('-t','--table',
parser.set_defaults(nolabels = False) dest = 'table',
action = 'store_true',
help = 'output heading line for proper ASCIItable format')
parser.set_defaults(head = False,
info = False,
labels = False,
data = False,
col = False,
labeled = True,
table = False,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles --------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':sys.stdout, 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = None,
buffered = False, labeled = options.labeled, readonly = True)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ extract labels --------------------------------------- # ------------------------------------------ output head ---------------------------------------
for file in files: table.head_read()
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') if not (options.head or options.info): table.info_clear()
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') if not (options.head or (options.labels and options.labeled)): table.labels_clear()
table = damask.ASCIItable(file['input'],file['output'],buffered=False,labels=not options.nolabels)# make unbuffered ASCII_table table.head_write(header = options.table)
table.head_read() # read ASCII header info
if options.head or options.info: file['output'].write('\n'.join(table.info)+'\n')
if options.head or options.labels: file['output'].write({True:'\n',False:'\t'}[options.col].join(table.labels)+'\n')
# ------------------------------------------ output data --------------------------------------- # ------------------------------------------ output data ---------------------------------------
@ -62,7 +81,4 @@ for file in files:
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
outputAlive = table.data_write() # output line outputAlive = table.data_write() # output line
outputAlive and table.output_flush() table.close()
if file['name'] != 'STDIN':
table.input_close()

View File

@ -21,13 +21,18 @@ With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fast
""", version = scriptID) """, version = scriptID)
parser.add_option('-l','--label', dest='keys', action='extend', metavar='<string LIST>', parser.add_option('-l','--label',
dest = 'keys',
action = 'extend', metavar = '<string LIST>',
help = 'list of column labels (a,b,c,...)') help = 'list of column labels (a,b,c,...)')
parser.add_option('-r','--reverse', dest='reverse', action='store_true', parser.add_option('-r','--reverse',
help='reverse sorting') dest = 'reverse',
action = 'store_true',
help = 'sort in reverse')
parser.set_defaults(key = []) parser.set_defaults(key = [],
parser.set_defaults(reverse = False) reverse = False,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
@ -36,50 +41,41 @@ if options.keys == None:
options.keys.reverse() # numpy sorts with most significant column as last options.keys.reverse() # numpy sorts with most significant column as last
# ------------------------------------------ setup file handles --------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False)
# ------------------------------------------ loop over input files --------------------------------------- table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(string.replace(scriptID,'\n','\\n') + \
'\t' + ' '.join(sys.argv[1:]))
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.head_read()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
table.data_readArray() table.data_readArray()
cols = [] cols = []
for column in table.label_index(options.keys): remarks = []
for i,column in enumerate(table.label_index(options.keys)):
if column < 0:
remarks.append("label {0} not present.".format(options.keys[i]))
else:
cols += [table.data[:,column]] cols += [table.data[:,column]]
if remarks != []: table.croak(remarks)
ind = np.lexsort(cols) ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0])
if options.reverse: if options.reverse: ind = ind[::-1]
ind = ind[::-1]
table.data = table.data[ind]
table.data_writeArray()
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
table.output_flush() # just in case of buffered ASCII table table.data = table.data[ind]
table.data_writeArray()
table.close() # close ASCII table
table.input_close() # close input ASCII table if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
if file['name'] != 'STDIN':
table.output_close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -12,125 +12,80 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Changes the (three-dimensional) canvas of a spectral geometry description. Changes the (three-dimensional) canvas of a spectral geometry description.
""", version = scriptID) """, version = scriptID)
parser.add_option('-g', '--grid', dest='grid', nargs = 3, metavar=' '.join(['string']*3), parser.add_option('-g', '--grid',
dest = 'grid',
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
help = 'a,b,c grid of hexahedral box [unchanged]') help = 'a,b,c grid of hexahedral box [unchanged]')
parser.add_option('-o', '--offset', dest='offset', type='int', nargs = 3, metavar=' '.join(['int']*3), parser.add_option('-o', '--offset',
dest = 'offset',
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help = 'a,b,c offset from old to new origin of grid %default') help = 'a,b,c offset from old to new origin of grid %default')
parser.add_option('-f', '--fill', dest='fill', type='int', metavar = 'int', parser.add_option('-f', '--fill',
dest = 'fill',
type = 'int', metavar = 'int',
help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]') help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]')
parser.set_defaults(grid = ['0','0','0']) parser.set_defaults(grid = ['0','0','0'],
parser.set_defaults(offset = (0,0,0)) offset = (0,0,0),
parser.set_defaults(fill = 0) fill = 0,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
#--- setup file handles -------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'input':open(name), buffered = False, labeled = False)
'output':open(name+'_tmp','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'],labels = False)
table.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'), table.head_read()
'size': np.zeros(3,'d'), info,extra_header = table.head_getGeom()
'origin': np.zeros(3,'d'),
'homogenization': 0, table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'microstructures': 0, 'size x y z: %s'%(' x '.join(map(str,info['size']))),
} 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
# --- do work ------------------------------------------------------------------------------------
newInfo = { newInfo = {
'grid': np.zeros(3,'i'), 'grid': np.zeros(3,'i'),
'origin': np.zeros(3,'d'), 'origin': np.zeros(3,'d'),
'microstructures': 0, 'microstructures': 0,
} }
extra_header = []
for header in table.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue # skip blank lines
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
if np.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue
#--- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
i = 0
while table.data_read():
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
#--- do work ------------------------------------------------------------------------------------
newInfo['grid'] = np.array([{True: int(o*float(n.translate(None,'xX'))), newInfo['grid'] = np.array([{True: int(o*float(n.translate(None,'xX'))),
False: int(n.translate(None,'xX'))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i') False: int(n.translate(None,'xX'))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i')
newInfo['grid'] = np.where(newInfo['grid'] <= 0, info['grid'],newInfo['grid']) newInfo['grid'] = np.where(newInfo['grid'] <= 0, info['grid'],newInfo['grid'])
microstructure = microstructure.reshape(info['grid'],order='F')
microstructure_cropped = np.zeros(newInfo['grid'],'i') microstructure_cropped = np.zeros(newInfo['grid'],'i')
microstructure_cropped.fill({True:options.fill,False:microstructure.max()+1}[options.fill>0]) microstructure_cropped.fill(options.fill if options.fill > 0 else microstructure.max()+1)
xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \ xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \
set(xrange(info['grid'][0]))) set(xrange(info['grid'][0])))
yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \ yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \
@ -152,43 +107,46 @@ for file in files:
newInfo['microstructures'] = microstructure_cropped.max() newInfo['microstructures'] = microstructure_cropped.max()
# --- report --------------------------------------------------------------------------------------- # --- report ---------------------------------------------------------------------------------------
if (any(newInfo['grid'] != info['grid'])):
file['croak'].write('--> grid a b c: %s\n'%(' x '.join(map(str,newInfo['grid']))))
if (any(newInfo['size'] != info['size'])):
file['croak'].write('--> size x y z: %s\n'%(' x '.join(map(str,newInfo['size']))))
if (any(newInfo['origin'] != info['origin'])):
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin']))))
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
if np.any(newInfo['grid'] < 1): remarks = []
file['croak'].write('invalid new grid a b c.\n') errors = []
continue
if np.any(newInfo['size'] <= 0.0): if (any(newInfo['grid'] != info['grid'])): remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid']))))
file['croak'].write('invalid new size x y z.\n') if (any(newInfo['size'] != info['size'])): remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size']))))
if (any(newInfo['origin'] != info['origin'])): remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin']))))
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
if np.any(newInfo['grid'] < 1): errors.append('invalid new grid a b c.')
if np.any(newInfo['size'] <= 0.0): errors.append('invalid new size x y z.')
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(newInfo['grid'][0],newInfo['grid'][1],newInfo['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
"size\tx %f\ty %f\tz %f"%(newInfo['size'][0],newInfo['size'][1],newInfo['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
"origin\tx %f\ty %f\tz %f"%(newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=newInfo['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
]) ])
table.labels_clear()
table.head_write() table.head_write()
table.output_flush() table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure_cropped.max())+1)) formatwidth = int(math.floor(math.log10(microstructure_cropped.max())+1))
table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose() table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close() table.close() # close ASCII table
table.output_close() if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name'])

View File

@ -12,82 +12,54 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog [file[s]]', description = """
Produce VTK rectilinear mesh of structure data from geom description Produce VTK rectilinear mesh of structure data from geom description
""", version = scriptID) """, version = scriptID)
parser.add_option('-n','--nodata', dest='data', action='store_false', parser.add_option('-m','--nodata',
help='omit microstructure data, just generate mesh') dest = 'data',
action = 'store_false',
help = 'generate mesh without microstructure index data')
parser.set_defaults(data = True) parser.set_defaults(data = True,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
#--- setup file handles -------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr, })
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':sys.stdout, 'croak':sys.stdout, }) table = damask.ASCIItable(name = name, outname = None,
buffered = False, labeled = False, readonly = True)
#--- loop over input files ------------------------------------------------------------------------ table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
theTable = damask.ASCIItable(file['input'],file['output'],labels=False)
theTable.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
for header in theTable.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
continue if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if np.any(info['size'] <= 0.0): if errors != []:
file['croak'].write('invalid size x y z.\n') file['croak'](errors)
table.close(dismiss = True)
continue continue
# --- generate VTK rectilinear grid --------------------------------------------------------------------------------
#--- generate grid --------------------------------------------------------------------------------
grid = vtk.vtkRectilinearGrid() grid = vtk.vtkRectilinearGrid()
grid.SetDimensions([x+1 for x in info['grid']]) grid.SetDimensions([x+1 for x in info['grid']])
for i in xrange(3): for i in xrange(3):
@ -102,43 +74,36 @@ for file in files:
#--- read microstructure information -------------------------------------------------------------- #--- read microstructure information --------------------------------------------------------------
if options.data: if options.data:
microstructure = table.microstructure_read(info['grid']) # read microstructure
structure = vtk.vtkIntArray() structure = vtk.vtkIntArray()
structure.SetName('Microstructures') structure.SetName('Microstructures')
while theTable.data_read(): for idx in microstructure:
items = theTable.data structure.InsertNextValue(idx)
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
for item in items:
structure.InsertNextValue(item)
grid.GetCellData().AddArray(structure) grid.GetCellData().AddArray(structure)
# --- write data ----------------------------------------------------------------------------------- # --- write data -----------------------------------------------------------------------------------
if file['name'] == 'STDIN':
if name == 'STDIN':
writer = vtk.vtkRectilinearGridWriter() writer = vtk.vtkRectilinearGridWriter()
writer.WriteToOutputStringOn() writer.WriteToOutputStringOn()
writer.SetFileTypeToASCII() writer.SetFileTypeToASCII()
writer.SetHeader('# powered by '+scriptID) writer.SetHeader('# powered by '+scriptID)
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
writer.SetInput(grid) else: writer.SetInputData(grid)
else:
writer.SetInputData(grid)
writer.Write() writer.Write()
file['output'].write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
else: else:
(dir,file) = os.path.split(file['name']) (dir,filename) = os.path.split(name)
writer = vtk.vtkXMLRectilinearGridWriter() writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetDataModeToBinary() writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.join(dir,'mesh_'+os.path.splitext(file)[0] writer.SetFileName(os.path.join(dir,'mesh_'+os.path.splitext(filename)[0]
+'.'+writer.GetDefaultFileExtension())) +'.'+writer.GetDefaultFileExtension()))
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
writer.SetInput(grid) else: writer.SetInputData(grid)
else:
writer.SetInputData(grid)
writer.Write() writer.Write()
table.close()

View File

@ -34,23 +34,11 @@ def periodic_3Dpad(array, rimdim=(1,1,1)):
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
features = [ features = [
{'aliens': 1, 'names': ['boundary','biplane'],}, {'aliens': 1, 'alias': ['boundary','biplane'],},
{'aliens': 2, 'names': ['tripleline',],}, {'aliens': 2, 'alias': ['tripleline',],},
{'aliens': 3, 'names': ['quadruplepoint',],} {'aliens': 3, 'alias': ['quadruplepoint',],}
] ]
neighborhoods = { neighborhoods = {
@ -101,118 +89,79 @@ boundaries, triple lines, and quadruple points.
""", version = scriptID) """, version = scriptID)
parser.add_option('-t','--type', dest = 'type', action = 'extend', type = 'string', metavar = '<string LIST>', parser.add_option('-t','--type',
help = 'feature type (%s) '%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) dest = 'type',
parser.add_option('-n','--neighborhood', dest='neighborhood', choices = neighborhoods.keys(), metavar = 'string', action = 'extend', metavar = '<string LIST>',
help = 'feature type (%s) '%(', '.join(map(lambda x:'|'.join(x['alias']),features))) )
parser.add_option('-n','--neighborhood',
dest = 'neighborhood',
choices = neighborhoods.keys(), metavar = 'string',
help = 'type of neighborhood (%s) [neumann]'%(', '.join(neighborhoods.keys()))) help = 'type of neighborhood (%s) [neumann]'%(', '.join(neighborhoods.keys())))
parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar='float', parser.add_option('-s', '--scale',
dest = 'scale',
type = 'float', metavar = 'float',
help = 'voxel size [%default]') help = 'voxel size [%default]')
parser.set_defaults(type = []) parser.set_defaults(type = [],
parser.set_defaults(neighborhood = 'neumann') neighborhood = 'neumann',
parser.set_defaults(scale = 1.0) scale = 1.0,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if len(options.type) == 0: parser.error('please select a feature type') if len(options.type) == 0 or \
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))): not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['alias'],features))))):
parser.error('type must be chosen from (%s)...'%(', '.join(map(lambda x:'|'.join(x['names']),features))) ) parser.error('sleect feature type from (%s).'%(', '.join(map(lambda x:'|'.join(x['alias']),features))) )
if 'biplane' in options.type and 'boundary' in options.type: if 'biplane' in options.type and 'boundary' in options.type:
parser.error("please select only one alias for 'biplane' and 'boundary'") parser.error("only one alias out 'biplane' and 'boundary' required")
feature_list = [] feature_list = []
for i,feature in enumerate(features): for i,feature in enumerate(features):
for name in feature['names']: for name in feature['alias']:
for myType in options.type: for myType in options.type:
if name.startswith(myType): if name.startswith(myType):
feature_list.append(i) # remember valid features feature_list.append(i) # remember selected features
break break
#--- setup file handles --------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = None,
'input':open(name), buffered = False, labeled = False, readonly = True)
'output':[open(features[feature]['names'][0]+'_'+name,'w') for feature in feature_list], table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'][0],labels = False)
table.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
newInfo = {
'grid': np.zeros(3,'i'),
'origin': np.zeros(3,'d'),
'microstructures': 0,
}
extra_header = []
for header in table.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue # skip blank lines
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
continue if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if np.any(info['size'] <= 0.0): if errors != []:
file['croak'].write('invalid size x y z.\n') table.croak(errors)
table.close(dismiss = True)
continue continue
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
i = 0
while table.data_read(): microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
table.close()
neighborhood = neighborhoods[options.neighborhood] neighborhood = neighborhoods[options.neighborhood]
convoluted = np.empty([len(neighborhood)]+list(info['grid']+2),'i') convoluted = np.empty([len(neighborhood)]+list(info['grid']+2),'i')
structure = periodic_3Dpad(microstructure.reshape(info['grid'],order='F')) structure = periodic_3Dpad(microstructure)
for i,p in enumerate(neighborhood): for i,p in enumerate(neighborhood):
stencil = np.zeros((3,3,3),'i') stencil = np.zeros((3,3,3),'i')
@ -222,7 +171,7 @@ for file in files:
p[2]+1] = 1 p[2]+1] = 1
convoluted[i,:,:,:] = ndimage.convolve(structure,stencil) convoluted[i,:,:,:] = ndimage.convolve(structure,stencil)
distance = np.ones((len(feature_list),info['grid'][0],info['grid'][1],info['grid'][2]),'d') # distance = np.ones(info['grid'],'d')
convoluted = np.sort(convoluted,axis = 0) convoluted = np.sort(convoluted,axis = 0)
uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
@ -233,36 +182,45 @@ for file in files:
convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself? convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself?
1,0) # count flip 1,0) # count flip
for i,feature_id in enumerate(feature_list): for feature in feature_list:
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
for i in xrange(len(feature_list)): table = damask.ASCIItable(name = name, outname = features[feature]['alias'][0]+'_'+name,
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 buffered = False, labeled = False, writeonly = True)
for i,feature in enumerate(feature_list): distance = np.where(uniques >= features[feature]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
newInfo['microstructures'] = int(math.ceil(distance[i,:,:,:].max())) distance = ndimage.morphology.distance_transform_edt(distance)*[options.scale]*3
# for i in xrange(len(feature_list)):
# distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
# for i,feature in enumerate(feature_list):
info['microstructures'] = int(math.ceil(distance.max()))
#--- write header --------------------------------------------------------------------------------- #--- write header ---------------------------------------------------------------------------------
table = damask.ASCIItable(file['input'],file['output'][i],labels = False)
table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=info['microstructures']),
]) ])
table.labels_clear()
table.head_write() table.head_write()
table.output_flush() table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1))
table.data = distance[i,:,:,:].reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() formatwidth = int(math.floor(math.log10(distance.max())+1))
table.data = distance.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
file['output'][i].close()
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close() table.close()
### 'output':[open(features[feature]['names'][0]+'_'+name,'w') for feature in feature_list],

108
processing/pre/geom_fromImage.py Executable file
View File

@ -0,0 +1,108 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,math,string
import numpy as np
from optparse import OptionParser
from PIL import Image,ImageOps
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0]
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Generate geometry description from (multilayer) images.
Microstructure index is based on gray scale value (1..256).
""", version = scriptID)
parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index [%default]')
parser.set_defaults(homogenization = 1,
)
(options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
for name in filenames:
if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0] +'.geom',
buffered = False, labeled = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# --- read image ------------------------------------------------------------------------------------
img = Image.open(name).convert(mode = 'L') # open and convert to grayscale 8bit
slice = 0
while True:
try:
img.seek(slice) # advance to slice
layer = np.expand_dims(1+np.array(img,dtype='uint16'),axis = 0) # read image layer
microstructure = layer if slice == 0 else np.vstack((microstructure,layer)) # add to microstructure data
slice += 1 # advance to next slice
except EOFError:
break
# http://docs.scipy.org/doc/scipy/reference/ndimage.html
# http://scipy-lectures.github.io/advanced/image_processing/
info = {
'grid': np.array(microstructure.shape,'i')[::-1],
'size': np.array(microstructure.shape,'d')[::-1],
'origin': np.zeros(3,'d'),
'microstructures': len(np.unique(microstructure)),
'homogenization': options.homogenization,
}
# --- report ---------------------------------------------------------------------------------------
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
file['croak'](errors)
table.close(dismiss = True)
continue
# --- write header ---------------------------------------------------------------------------------
table.info_clear()
table.info_append([
scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
])
table.labels_clear()
table.head_write()
table.output_flush()
# --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
table.data = microstructure.reshape((info['grid'][1]*info['grid'][2],info['grid'][0]),order='C')
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
# --- output finalization --------------------------------------------------------------------------
table.close() # close ASCII table

View File

@ -27,38 +27,62 @@ Generate a geometry file of a bicontinuous structure of given type.
""", version = scriptID) """, version = scriptID)
parser.add_option('-t','--type', dest='type', choices=minimal_surfaces, metavar='string', \ parser.add_option('-t','--type',
dest = 'type',
choices = minimal_surfaces, metavar = 'string',
help = 'type of minimal surface [primitive] {%s}' %(','.join(minimal_surfaces))) help = 'type of minimal surface [primitive] {%s}' %(','.join(minimal_surfaces)))
parser.add_option('-f','--threshold', dest='threshold', type='float', metavar='float', \ parser.add_option('-f','--threshold',
dest = 'threshold',
type = 'float', metavar = 'float',
help = 'threshold value defining minimal surface [%default]') help = 'threshold value defining minimal surface [%default]')
parser.add_option('-g', '--grid', dest='grid', type='int', nargs=3, metavar='int int int', \ parser.add_option('-g', '--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = 'int int int',
help = 'a,b,c grid of hexahedral box [%default]') help = 'a,b,c grid of hexahedral box [%default]')
parser.add_option('-s', '--size', dest='size', type='float', nargs=3, metavar='float float float', \ parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'x,y,z size of hexahedral box [%default]') help = 'x,y,z size of hexahedral box [%default]')
parser.add_option('-p', '--periods', dest='periods', type='int', metavar= 'int', \ parser.add_option('-p', '--periods',
dest = 'periods',
type = 'int', metavar = 'int',
help = 'number of repetitions of unit cell [%default]') help = 'number of repetitions of unit cell [%default]')
parser.add_option('--homogenization', dest='homogenization', type='int', metavar= 'int', \ parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [%default]') help = 'homogenization index to be used [%default]')
parser.add_option('--m', dest='microstructure', type='int', nargs = 2, metavar= 'int int', \ parser.add_option('--m',
dest = 'microstructure',
type = 'int', nargs = 2, metavar = 'int int',
help = 'two microstructure indices to be used [%default]') help = 'two microstructure indices to be used [%default]')
parser.add_option('-2', '--twodimensional', dest='twoD', action='store_true', \ parser.add_option('-1', '--onedimensional',
dest = 'oneD',
action = 'store_true',
help = 'output geom file with two-dimensional data arrangement [%default]') help = 'output geom file with two-dimensional data arrangement [%default]')
parser.set_defaults(type = minimal_surfaces[0]) parser.set_defaults(type = minimal_surfaces[0],
parser.set_defaults(threshold = 0.0) threshold = 0.0,
parser.set_defaults(periods = 1) periods = 1,
parser.set_defaults(grid = (16,16,16)) grid = (16,16,16),
parser.set_defaults(size = (1.0,1.0,1.0)) size = (1.0,1.0,1.0),
parser.set_defaults(homogenization = 1) homogenization = 1,
parser.set_defaults(microstructure = (1,2)) microstructure = (1,2),
parser.set_defaults(twoD = False) oneD = False,
)
(options,filename) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handle ------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filename == []:
file = {'output':sys.stdout, 'croak':sys.stderr} if filenames == []: filenames = ['STDIN']
else:
file = {'output':open(filename[0],'w'), 'croak':sys.stderr} for name in filenames:
if not (name == 'STDIN' or os.path.exists(name)): continue
table = damask.ASCIItable(name = name, outname = name,
buffered = False, labeled = False, writeonly = True)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ make grid -------------------------------------
info = { info = {
'grid': np.array(options.grid), 'grid': np.array(options.grid),
@ -69,37 +93,49 @@ info = {
} }
#--- report --------------------------------------------------------------------------------------- #--- report ---------------------------------------------------------------------------------------
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n\n'%info['microstructures'])
if np.any(info['grid'] < 1): table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
file['croak'].write('invalid grid a b c.\n') 'size x y z: %s'%(' x '.join(map(str,info['size']))),
sys.exit() 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
if np.any(info['size'] <= 0.0): 'homogenization: %i'%info['homogenization'],
file['croak'].write('invalid size x y z.\n') 'microstructures: %i'%info['microstructures'],
sys.exit() ])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
#--- write header --------------------------------------------------------------------------------- #--- write header ---------------------------------------------------------------------------------
header = [scriptID + ' ' + ' '.join(sys.argv[1:])+'\n']
header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],)) table.labels_clear()
header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],)) table.info_clear()
header.append("origin\tx %f\ty %f\tz %f\n"%(info['origin'][0],info['origin'][1],info['origin'][2],)) table.info_append([
header.append("microstructures\t%i\n"%info['microstructures']) scriptID + ' ' + ' '.join(sys.argv[1:]),
header.append("homogenization\t%i\n"%info['homogenization']) "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
file['output'].write('%i\theader\n'%(len(header))+''.join(header)) "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
])
table.head_write()
#--- write data ----------------------------------------------------------------------------------- #--- write data -----------------------------------------------------------------------------------
X = options.periods*2.0*math.pi*(np.arange(options.grid[0])+0.5)/options.grid[0]
Y = options.periods*2.0*math.pi*(np.arange(options.grid[1])+0.5)/options.grid[1]
Z = options.periods*2.0*math.pi*(np.arange(options.grid[2])+0.5)/options.grid[2]
for z in xrange(options.grid[2]): for z in xrange(options.grid[2]):
Z = options.periods*2.0*math.pi*(z+0.5)/options.grid[2]
for y in xrange(options.grid[1]): for y in xrange(options.grid[1]):
Y = options.periods*2.0*math.pi*(y+0.5)/options.grid[1] table.data_clear()
for x in xrange(options.grid[0]): for x in xrange(options.grid[0]):
X = options.periods*2.0*math.pi*(x+0.5)/options.grid[0] table.data_append(options.microstructure[options.threshold < surface[options.type](X[x],Y[y],Z[z])])
file['output'].write(str(options.microstructure[0]) if options.threshold > surface[options.type](X,Y,Z) if options.oneD:
else str(options.microstructure[1])) table.data_write()
file['output'].write(' ' if options.twoD else '\n') table.data_clear()
file['output'].write('\n' if options.twoD else '') table.data_write()
table.close()

View File

@ -32,35 +32,61 @@ Generate geometry description and material configuration from position, phase, a
""", version = scriptID) """, version = scriptID)
parser.add_option('--coordinates', dest='coordinates', type='string', metavar='string', parser.add_option('--coordinates',
dest = 'coordinates',
type = 'string', metavar = 'string',
help = 'coordinates label') help = 'coordinates label')
parser.add_option('--phase', dest='phase', type='string', metavar='string', parser.add_option('--phase',
dest = 'phase',
type = 'string', metavar = 'string',
help = 'phase label') help = 'phase label')
parser.add_option('-t', '--tolerance', dest='tolerance', type='float', metavar='float', parser.add_option('-t', '--tolerance',
dest = 'tolerance',
type = 'float', metavar = 'float',
help = 'angular tolerance for orientation squashing [%default]') help = 'angular tolerance for orientation squashing [%default]')
parser.add_option('-e', '--eulers', dest='eulers', metavar='string', parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'Euler angles label') help = 'Euler angles label')
parser.add_option('-d', '--degrees', dest='degrees', action='store_true', parser.add_option('-d', '--degrees',
dest = 'degrees',
action = 'store_true',
help = 'angles are given in degrees [%default]') help = 'angles are given in degrees [%default]')
parser.add_option('-m', '--matrix', dest='matrix', metavar='string', parser.add_option('-m', '--matrix',
dest = 'matrix',
type = 'string', metavar = 'string',
help = 'orientation matrix label') help = 'orientation matrix label')
parser.add_option('-a', dest='a', metavar='string', parser.add_option('-a',
dest='a',
type = 'string', metavar = 'string',
help = 'crystal frame a vector label') help = 'crystal frame a vector label')
parser.add_option('-b', dest='b', metavar='string', parser.add_option('-b',
dest='b',
type = 'string', metavar = 'string',
help = 'crystal frame b vector label') help = 'crystal frame b vector label')
parser.add_option('-c', dest='c', metavar='string', parser.add_option('-c',
dest = 'c',
type = 'string', metavar='string',
help = 'crystal frame c vector label') help = 'crystal frame c vector label')
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string', parser.add_option('-q', '--quaternion',
dest = 'quaternion',
type = 'string', metavar='string',
help = 'quaternion label') help = 'quaternion label')
parser.add_option( '--axes', dest='axes', nargs=3, metavar=' '.join(['string']*3), parser.add_option('--axes',
dest = 'axes',
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
help = 'orientation coordinate frame in terms of position coordinate frame [same]') help = 'orientation coordinate frame in terms of position coordinate frame [same]')
parser.add_option('-s', '--symmetry', dest='symmetry', action='extend', parser.add_option('-s', '--symmetry',
metavar='<string LIST>', dest = 'symmetry',
help = 'crystal symmetry [%s] {%s} '%(damask.Symmetry.lattices[-1], action = 'extend', metavar = '<string LIST>',
', '.join(damask.Symmetry.lattices[1:]))) help = 'crystal symmetry %default {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
parser.add_option('--homogenization', dest='homogenization', type='int', metavar='int', parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [%default]') help = 'homogenization index to be used [%default]')
parser.add_option('--crystallite', dest='crystallite', type='int', metavar='int', parser.add_option('--crystallite',
dest = 'crystallite',
type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]') help = 'crystallite index to be used [%default]')
parser.set_defaults(symmetry = [damask.Symmetry.lattices[-1]], parser.set_defaults(symmetry = [damask.Symmetry.lattices[-1]],
@ -82,7 +108,7 @@ input = [options.eulers != None,
if np.sum(input) != 1: parser.error('needs exactly one orientation input format...') if np.sum(input) != 1: parser.error('needs exactly one orientation input format...')
if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])): if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])):
parser.error('invalid axes %s %s %s'%tuple(options.axes)) parser.error('invalid axes {axes[0]} {axes[1]} {axes[2]}'.format(axes=options.axes))
(label,dim,inputtype) = [(options.eulers,3,'eulers'), (label,dim,inputtype) = [(options.eulers,3,'eulers'),
([options.a,options.b,options.c],[3,3,3],'frame'), ([options.a,options.b,options.c],[3,3,3],'frame'),
@ -90,36 +116,33 @@ if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y
(options.quaternion,4,'quaternion'), (options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested ][np.where(input)[0][0]] # select input label that was requested
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
options.tolerance *= toRadians # angular tolerance in radians options.tolerance *= toRadians # ensure angular tolerance in radians
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0] + '.geom',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, # ------------------------------------------ read head ---------------------------------------
'input':open(name),
'output':open(name + '_tmp','w'),
'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
# ------------------------------------------ sanity checks ---------------------------------------
errors = [] errors = []
if table.label_dimension(options.coordinates) != 2: if table.label_dimension(options.coordinates) != 2:
errors.append('coordinates %s need to have two dimensions...'%options.coordinates) errors.append('coordinates {} need to have two dimensions.'.format(options.coordinates))
if not np.all(table.label_dimension(label) == dim): if not np.all(table.label_dimension(label) == dim):
errors.append('orientation %s needs to have dimension %i...\n'%(label,dim)) errors.append('orientation {} needs to have dimension {}.'.format(label,dim))
if options.phase != None and table.label_dimension(options.phase) != 1: if options.phase != None and table.label_dimension(options.phase) != 1:
errors.append('phase column %s is not scalar...'%options.phase) errors.append('phase column {} is not scalar.'.format(options.phase))
if errors == []: if errors == []: # so far no errors?
table.data_readArray([options.coordinates,label]+([] if options.phase == None else [options.phase])) table.data_readArray([options.coordinates,label]+([] if options.phase == None else [options.phase]))
if options.phase == None: if options.phase == None:
@ -135,10 +158,10 @@ for name in filenames:
if nX*nY != len(table.data) \ if nX*nY != len(table.data) \
or np.any(np.abs(np.log10((coordsX[1:]-coordsX[:-1])/dX)) > 0.01) \ or np.any(np.abs(np.log10((coordsX[1:]-coordsX[:-1])/dX)) > 0.01) \
or np.any(np.abs(np.log10((coordsY[1:]-coordsY[:-1])/dY)) > 0.01): or np.any(np.abs(np.log10((coordsY[1:]-coordsY[:-1])/dY)) > 0.01):
errors.append('data is not on square grid...') errors.append('data is not on square grid.')
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)+'\n') table.croak(errors)
table.close(dismiss = True) table.close(dismiss = True)
continue continue
@ -158,8 +181,7 @@ for name in filenames:
myRank = 0 # rank of current grid point myRank = 0 # rank of current grid point
for y in xrange(nY): for y in xrange(nY):
for x in xrange(nX): for x in xrange(nX):
if (myRank+1)%max(1,nX*nY/100) == 0: if (myRank+1)%(nX*nY/100.) < 1: table.croak('.',False)
file['croak'].write('.')
myData = table.data[index[myRank]] myData = table.data[index[myRank]]
mySym = options.symmetry[min(int(myData[colPhase]),len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index) mySym = options.symmetry[min(int(myData[colPhase]),len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index)
if inputtype == 'eulers': if inputtype == 'eulers':
@ -195,14 +217,15 @@ for name in filenames:
if breaker: break if breaker: break
if microstructure[myRank] == 0: # no other orientation resembled me if microstructure[myRank] == 0: # no other orientation resembled me
nGrains += 1 nGrains += 1 # make new grain ...
microstructure[myRank] = nGrains microstructure[myRank] = nGrains # ... and assign to me
symQuats.append(o.equivalentQuaternions()) # store all symmetrically equivalent orientations for future comparison symQuats.append(o.equivalentQuaternions()) # store all symmetrically equivalent orientations for future comparison
phases.append(myData[colPhase]) # store phase info for future reporting phases.append(myData[colPhase]) # store phase info for future reporting
myRank += 1 myRank += 1
file['croak'].write('\n') table.croak('')
# --- generate header ---------------------------------------------------------------------------- # --- generate header ----------------------------------------------------------------------------
info = { info = {
@ -218,11 +241,12 @@ for name in filenames:
'homogenization': options.homogenization, 'homogenization': options.homogenization,
} }
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
@ -246,11 +270,11 @@ for name in filenames:
table.info_clear() table.info_clear()
table.info_append([ table.info_append([
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(info['microstructures']), "microstructures\t{microstructures}".format(microstructures=info['microstructures']),
config_header, config_header,
]) ])
table.head_write() table.head_write()
@ -263,6 +287,3 @@ for name in filenames:
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
table.close() table.close()
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',
os.path.splitext(file['name'])[0] + '.geom')

View File

@ -3,6 +3,7 @@
import os,re,sys,math,string import os,re,sys,math,string
import numpy as np import numpy as np
import multiprocessing
from optparse import OptionParser from optparse import OptionParser
import damask import damask
@ -30,12 +31,20 @@ def meshgrid2(*arrs):
ans.insert(0,arr2) ans.insert(0,arr2)
return tuple(ans) return tuple(ans)
def laguerreTessellation(undeformed, coords, weights, grain): def findClosestSeed(fargs):
point, seeds, weightssquared = fargs
tmp = np.repeat(point.reshape(3,1), len(seeds), axis=1).T
dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - weightssquared
return np.argmin(dist) # seed point closest to point
weight = np.power(np.tile(weights, 27),2) # Laguerre weights (squared)
micro = np.zeros(undeformed.shape[0]) def laguerreTessellation(undeformed, coords, weights, grains, nonperiodic = False, cpus = 2):
N = coords.shape[0] # Number of seeds points
periodic = np.array([ copies = \
np.array([
[ 0, 0, 0 ],
]).astype(float) if nonperiodic else \
np.array([
[ -1,-1,-1 ], [ -1,-1,-1 ],
[ 0,-1,-1 ], [ 0,-1,-1 ],
[ 1,-1,-1 ], [ 1,-1,-1 ],
@ -65,251 +74,280 @@ def laguerreTessellation(undeformed, coords, weights, grain):
[ 1, 1, 1 ], [ 1, 1, 1 ],
]).astype(float) ]).astype(float)
for i,vec in enumerate(periodic): squaredweights = np.power(np.tile(weights,len(copies)),2) # Laguerre weights (squared, size N*n)
# micro = np.zeros(undeformed.shape[0],'i')
N = coords.shape[0] # Number of seeds points
for i,vec in enumerate(copies): # periodic copies of seed points (size N*n)
seeds = np.append(seeds, coords+vec, axis=0) if i > 0 else coords+vec seeds = np.append(seeds, coords+vec, axis=0) if i > 0 else coords+vec
for i,point in enumerate(undeformed): arguments = [[arg] + [seeds,squaredweights] for arg in list(undeformed)]
tmp = np.repeat(point.reshape(3,1), N*27, axis=1).T # Initialize workers
dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - weight pool = multiprocessing.Pool(processes = cpus)
micro[i] = grain[np.argmin(dist)%N]
return micro # Evaluate function
result = pool.map_async(findClosestSeed, arguments)
# closestSeeds = np.array(pool.map_async(findClosestSeed, arguments),'i')
pool.close()
pool.join()
closestSeeds = np.array(result.get()).flatten()
print 'shape of result',closestSeeds.shape
return grains[closestSeeds%N]
# for i,point in enumerate(undeformed):
# tmp = np.repeat(point.reshape(3,1), N*len(copies), axis=1).T
# dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - squaredweights
# micro[i] = grains[np.argmin(dist)%N]
#
# return micro
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Generate geometry description and material configuration by standard Voronoi tessellation of given seeds file. Generate geometry description and material configuration by standard Voronoi tessellation of given seeds file.
""", version = scriptID) """, version = scriptID)
parser.add_option('-g', '--grid', dest='grid', type='int', nargs = 3, metavar=' '.join(['int']*3), parser.add_option('-g', '--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help = 'a,b,c grid of hexahedral box [from seeds file]') help = 'a,b,c grid of hexahedral box [from seeds file]')
parser.add_option('-s', '--size', dest='size', type='float', nargs = 3, metavar=' '.join(['float']*3), parser.add_option('-s', '--size',
help='x,y,z size of hexahedral box [1.0 along largest grid point number]') dest = 'size',
parser.add_option('-o', '--origin', dest='origin', type='float', nargs = 3, metavar=' '.join(['float']*3), type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'x,y,z size of hexahedral box [from seeds file or 1.0 along largest grid point number]')
parser.add_option('-o', '--origin',
dest = 'origin',
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'offset from old to new origin of grid') help = 'offset from old to new origin of grid')
parser.add_option('--homogenization', dest='homogenization', type='int', metavar = 'int', parser.add_option('-p', '--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for seed positions [%default]')
parser.add_option('-w', '--weight',
dest = 'weight',
type = 'string', metavar = 'string',
help = 'column label for seed weights [%default]')
parser.add_option('-m', '--microstructure',
dest = 'microstructure',
type = 'string', metavar = 'string',
help = 'column label for seed microstructures [%default]')
parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'column label for seed Euler angles [%default]')
parser.add_option('--axes',
dest = 'axes',
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
help = 'orientation coordinate frame in terms of position coordinate frame [same]')
parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [%default]') help = 'homogenization index to be used [%default]')
parser.add_option('--phase', dest='phase', type='int', metavar = 'int', parser.add_option('--crystallite',
help='phase index to be used [%default]') dest = 'crystallite',
parser.add_option('--crystallite', dest='crystallite', type='int', metavar = 'int', type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]') help = 'crystallite index to be used [%default]')
parser.add_option('-c', '--configuration', dest='config', action='store_true', parser.add_option('--phase',
help='output material configuration [%default]') dest = 'phase',
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', type = 'int', metavar = 'int',
help = 'phase index to be used [%default]')
parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar='int',
help = 'seed of random number generator for second phase distribution [%default]') help = 'seed of random number generator for second phase distribution [%default]')
parser.add_option('--secondphase', type='float', dest='secondphase', metavar= 'float', parser.add_option('--secondphase',
dest = 'secondphase',
type = 'float', metavar= 'float',
help = 'volume fraction of randomly distribute second phase [%default]') help = 'volume fraction of randomly distribute second phase [%default]')
parser.add_option('-l', '--laguerre', dest='laguerre', action='store_true', parser.add_option('-l', '--laguerre',
dest = 'laguerre',
action = 'store_true',
help = 'use Laguerre (weighted Voronoi) tessellation [%default]') help = 'use Laguerre (weighted Voronoi) tessellation [%default]')
parser.set_defaults(grid = (0,0,0), parser.add_option('--cpus',
size = (0.0,0.0,0.0), dest = 'cpus',
origin = (0.0,0.0,0.0), type = 'int', metavar = 'int',
help = 'number of parallel processes to use for Laguerre tessellation [%default]')
parser.add_option('--nonperiodic',
dest = 'nonperiodic',
action = 'store_true',
help = 'use nonperiodic tessellation [%default]')
parser.set_defaults(grid = None,
size = None,
origin = None,
position = 'pos',
weight = 'weight',
microstructure = 'microstructure',
eulers = 'Euler',
homogenization = 1, homogenization = 1,
phase = 1,
crystallite = 1, crystallite = 1,
phase = 1,
secondphase = 0.0, secondphase = 0.0,
config = False, cpus = 2,
laguerre = False, laguerre = False,
nonperiodic = False,
randomSeed = None, randomSeed = None,
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.secondphase > 1.0 or options.secondphase < 0.0: if options.secondphase > 1.0 or options.secondphase < 0.0:
parser.error('volume fraction of second phase (%f) out of bounds...'%options.secondphase) parser.error('volume fraction of second phase ({}) out of bounds.'.format(options.secondphase))
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') outname = os.path.splitext(name)[0]+'.geom',
else: buffered = False)
if not os.path.exists(name): continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table # --- read header ----------------------------------------------------------------------------
table.head_read() # read ASCII header info
table.head_read()
info,extra_header = table.head_getGeom()
if options.grid != None: info['grid'] = options.grid
if options.size != None: info['size'] = options.size
if options.origin != None: info['origin'] = options.origin
# ------------------------------------------ sanity checks ---------------------------------------
remarks = []
errors = []
labels = [] labels = []
if np.all(table.label_index(['1_coords','2_coords','3_coords']) != -1):
coords = ['1_coords','2_coords','3_coords'] hasGrains = table.label_dimension(options.microstructure) == 1
elif np.all(table.label_index(['x','y','z']) != -1): hasEulers = table.label_dimension(options.eulers) == 3
coords = ['x','y','z'] hasWeights = table.label_dimension(options.weight) == 1
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0) \
and np.all(info['grid'] < 1): errors.append('invalid size x y z.')
else: else:
file['croak'].write('no coordinate data (1/2/3_coords | x/y/z) found ...')
continue
labels += coords
hasEulers = np.all(table.label_index(['phi1','Phi','phi2']) != -1)
if hasEulers:
labels += ['phi1','Phi','phi2']
hasGrains = table.label_index('microstructure') != -1
if hasGrains:
labels += ['microstructure']
hasWeight = table.label_index('weight') != -1
if hasWeight:
labels += ['weight']
table.data_readArray(labels)
coords = table.data[:,table.label_index(coords)]
eulers = table.data[:,table.label_index(['phi1','Phi','phi2'])] if hasEulers else np.zeros(3*len(coords))
grain = table.data[:,table.label_index('microstructure')] if hasGrains else 1+np.arange(len(coords))
weights = table.data[:,table.label_index('weight')] if hasWeight else np.zeros(len(coords))
grainIDs = np.unique(grain).astype('i')
#--- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.array(options.size),
'origin': np.zeros(3,'d'),
'microstructures': 0,
'homogenization': options.homogenization,
}
newInfo = {
'microstructures': 0,
}
extra_header = []
for header in table.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
if info['microstructures'] != len(grainIDs):
file['croak'].write('grain data not matching grain count (%i)...\n'%(len(grainIDs)))
info['microstructures'] = len(grainIDs)
if 0 not in options.grid: # user-specified grid
info['grid'] = np.array(options.grid)
for i in xrange(3): for i in xrange(3):
if info['size'][i] <= 0.0: # any invalid size? if info['size'][i] <= 0.0: # any invalid size?
info['size'][i] = float(info['grid'][i])/max(info['grid']) info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid
file['croak'].write('rescaling size %s...\n'%{0:'x',1:'y',2:'z'}[i]) remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i]))
file['croak'].write('grains to map: %i\n'%info['microstructures'] + \ if table.label_dimension(options.position) != 3:
'grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ errors.append('position columns "{}" have dimension {}.'.format(options.position,
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ table.label_dimension(options.position)))
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ else:
'homogenization: %i\n'%info['homogenization']) labels += [options.position]
if np.any(info['grid'] < 1): if not hasEulers: remarks.append('missing seed orientations...')
file['croak'].write('invalid grid a b c.\n') else: labels += [options.eulers]
continue if not hasGrains: remarks.append('missing seed microstructure indices...')
if np.any(info['size'] <= 0.0): else: labels += [options.microstructure]
file['croak'].write('invalid size x y z.\n') if options.laguerre and not hasWeights: remarks.append('missing seed weights...')
continue else: labels += [options.weight]
if info['microstructures'] == 0:
file['croak'].write('no grain info found.\n') if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss=True)
continue continue
#--- prepare data --------------------------------------------------------------------------------- # ------------------------------------------ read seeds ---------------------------------------
eulers = eulers.T
#--- switch according to task --------------------------------------------------------------------- table.data_readArray(labels)
if options.config: # write config file coords = table.data[:,table.label_index(options.position):table.label_index(options.position)+3]
phase = np.empty(info['microstructures'],'i') eulers = table.data[:,table.label_index(options.eulers ):table.label_index(options.eulers )+3] if hasEulers else np.zeros(3*len(coords))
phase.fill(options.phase) grains = table.data[:,table.label_index(options.microstructure)].astype('i') if hasGrains else 1+np.arange(len(coords))
phase[0:int(float(info['microstructures'])*options.secondphase)] = options.phase+1 weights = table.data[:,table.label_index(options.weight)] if hasWeights else np.zeros(len(coords))
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase grainIDs = np.unique(grains).astype('i')
np.random.seed(randomSeed) NgrainIDs = len(grainIDs)
np.random.shuffle(phase)
formatwidth = 1+int(math.log10(info['microstructures']))
file['output'].write('#' + scriptID + ' ' + ' '.join(sys.argv[1:])+'\n')
if options.secondphase > 0.0: file['output'].write('# random seed for second phase %i\n'%randomSeed)
file['output'].write('\n<microstructure>\n')
for i,ID in enumerate(grainIDs):
file['output'].write('\n[Grain%s]\n'%(str(ID).zfill(formatwidth)) + \
'crystallite %i\n'%options.crystallite + \
'(constituent)\tphase %i\ttexture %s\tfraction 1.0\n'%(phase[i],str(ID).rjust(formatwidth)))
file['output'].write('\n<texture>\n') # --- tessellate microstructure ------------------------------------------------------------
for ID in grainIDs:
eulerID = np.nonzero(grain == ID)[0][0] # find first occurrence of this grain id
file['output'].write('\n[Grain%s]\n'%(str(ID).zfill(formatwidth)) + \
'(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0\n'%(eulers[0,eulerID],
eulers[1,eulerID],
eulers[2,eulerID]))
else: # write geometry file
x = (np.arange(info['grid'][0])+0.5)*info['size'][0]/info['grid'][0] x = (np.arange(info['grid'][0])+0.5)*info['size'][0]/info['grid'][0]
y = (np.arange(info['grid'][1])+0.5)*info['size'][1]/info['grid'][1] y = (np.arange(info['grid'][1])+0.5)*info['size'][1]/info['grid'][1]
z = (np.arange(info['grid'][2])+0.5)*info['size'][2]/info['grid'][2] z = (np.arange(info['grid'][2])+0.5)*info['size'][2]/info['grid'][2]
if not options.laguerre: table.croak('tessellating...')
if options.laguerre:
undeformed = np.vstack(np.meshgrid(x, y, z)).reshape(3,-1).T
indices = laguerreTessellation(undeformed, coords, weights, grains, options.nonperiodic, options.cpus)
else:
coords = (coords*info['size']).T coords = (coords*info['size']).T
undeformed = np.vstack(map(np.ravel, meshgrid2(x, y, z))) undeformed = np.vstack(map(np.ravel, meshgrid2(x, y, z)))
file['croak'].write('tessellating...\n')
indices = damask.core.math.periodicNearestNeighbor(\ indices = damask.core.math.periodicNearestNeighbor(\
info['size'],\ info['size'],\
np.eye(3),\ np.eye(3),\
undeformed,coords)//3**3 + 1 # floor division to kill periodic images undeformed,coords)//3**3 + 1 # floor division to kill periodic images
indices = grain[indices-1] indices = grains[indices-1]
else :
undeformed = np.vstack(np.meshgrid(x, y, z)).reshape(3,-1).T
indices = laguerreTessellation(undeformed, coords, weights, grain)
newInfo['microstructures'] = info['microstructures']
for i in grainIDs:
if i not in indices: newInfo['microstructures'] -= 1
file['croak'].write(('all' if newInfo['microstructures'] == info['microstructures'] else 'only') +
' %i'%newInfo['microstructures'] +
('' if newInfo['microstructures'] == info['microstructures'] else \
' out of %i'%info['microstructures']) +
' grains mapped.\n')
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
grainIDs = np.intersect1d(grainIDs,indices)
info['microstructures'] = len(grainIDs)
if info['homogenization'] == 0: info['homogenization'] = options.homogenization
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i%s'%(info['microstructures'],
(' out of %i'%NgrainIDs if NgrainIDs != info['microstructures'] else '')),
])
config_header = []
formatwidth = 1+int(math.log10(info['microstructures']))
phase = options.phase * np.ones(info['microstructures'],'i')
if int(options.secondphase*info['microstructures']) > 0:
phase[0:int(options.secondphase*info['microstructures'])] += 1
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None \
else options.randomSeed # random seed for second phase
np.random.seed(randomSeed)
np.random.shuffle(phase)
config_header += ['# random seed (phase shuffling): {}'.format(randomSeed)]
config_header += ['<microstructure>']
for i,ID in enumerate(grainIDs):
config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)),
'crystallite %i'%options.crystallite,
'(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase[i],str(ID).rjust(formatwidth)),
]
if hasEulers:
config_header += ['<texture>']
for ID in grainIDs:
eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id
config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)),
'axes\t%s %s %s'%tuple(options.axes) if options.axes != None else '',
'(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(eulers[eulerID]),
]
table.labels_clear() table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append([
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=info['microstructures']),
config_header,
]) ])
table.head_write() table.head_write()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = 1+int(math.log10(newInfo['microstructures']))
table.data = indices.reshape(info['grid'][1]*info['grid'][2],info['grid'][0]) table.data = indices.reshape(info['grid'][1]*info['grid'][2],info['grid'][0])
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
table.close() table.close()
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',
os.path.splitext(file['name'])[0] +'%s'%('_material.config' if options.config else '.geom'))

View File

@ -12,18 +12,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
compress geometry files with ranges "a to b" and/or multiples "n of x". compress geometry files with ranges "a to b" and/or multiples "n of x".
@ -32,84 +20,55 @@ compress geometry files with ranges "a to b" and/or multiples "n of x".
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ----------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
files = []
if filenames == []:
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'croak':sys.stdout}) buffered = False, labeled = False)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# ------------------------------------------ loop over input files -------------------------------- # --- interpret header ----------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
if file['name'] != 'STDIN': table.head_read()
file['input'] = open(file['name']) info,extra_header = table.head_getGeom()
file['output'] = open(file['name']+'_tmp','w')
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
table.head_read() # read ASCII header info 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
#--- interpret header ----------------------------------------------------------------------------- errors = []
info = { if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
'grid': np.zeros(3,'i'), if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
'size': np.zeros(3,'d'), if errors != []:
'origin': np.zeros(3,'d'), table.croak(errors)
'homogenization': 0, table.close(dismiss = True)
'microstructures': 0,
}
extra_header = []
for header in table.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
if np.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue continue
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
table.labels_clear() table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %e\ty %e\tz %e"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %e\ty %e\tz %e"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(info['microstructures']), "microstructures\t{microstructures}".format(microstructures=info['microstructures']),
]) ])
table.head_write() table.head_write()
table.output_flush()
# --- write packed microstructure information ----------------------------------------------------- # --- write packed microstructure information -----------------------------------------------------
type = '' type = ''
former = -1 former = start = -1
start = -1
reps = 0 reps = 0
outputAlive = True outputAlive = True
@ -134,11 +93,12 @@ for file in files:
elif type == '.': elif type == '.':
table.data = [str(former)] table.data = [str(former)]
elif type == 'to': elif type == 'to':
table.data = ['%i to %i'%(former-reps+1,former)] table.data = ['{0} to {1}'.format(former-reps+1,former)]
elif type == 'of': elif type == 'of':
table.data = ['%i of %i'%(reps,former)] table.data = ['{0} of {1}'.format(reps,former)]
outputAlive = table.data_write(delimiter = ' ') # output processed line outputAlive = table.data_write(delimiter = ' ') # output processed line
type = '.' type = '.'
start = current start = current
reps = 1 reps = 1
@ -150,14 +110,10 @@ for file in files:
'to': ['%i to %i'%(former-reps+1,former)], 'to': ['%i to %i'%(former-reps+1,former)],
'of': ['%i of %i'%(reps,former)], 'of': ['%i of %i'%(reps,former)],
}[type] }[type]
outputAlive = table.data_write(delimiter = ' ') # output processed line outputAlive = table.data_write(delimiter = ' ') # output processed line
# ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close() # close input ASCII table table.close() # close ASCII table
table.output_close() # close input ASCII table if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -12,18 +12,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Scales a geometry description independently in x, y, and z direction in terms of grid and/or size. Scales a geometry description independently in x, y, and z direction in terms of grid and/or size.
@ -31,103 +19,68 @@ Either absolute values or relative factors (like "0.25x") can be used.
""", version = scriptID) """, version = scriptID)
parser.add_option('-g', '--grid', dest='grid', nargs = 3, metavar = 'string string string', \ parser.add_option('-g', '--grid',
dest = 'grid',
type = 'string', nargs = 3, metavar = 'string string string',
help = 'a,b,c grid of hexahedral box [unchanged]') help = 'a,b,c grid of hexahedral box [unchanged]')
parser.add_option('-s', '--size', dest='size', nargs = 3, metavar = 'string string string', \ parser.add_option('-s', '--size',
dest = 'size',
type = 'string', nargs = 3, metavar = 'string string string',
help = 'x,y,z size of hexahedral box [unchanged]') help = 'x,y,z size of hexahedral box [unchanged]')
parser.add_option('-r', '--renumber', dest='renumber', action='store_true', \ parser.add_option('-r', '--renumber',
help='renumber microstructure indices from 1...N [%default]') dest = 'renumber',
action = 'store_true',
help = 'renumber microstructure indices from 1..N [%default]')
parser.set_defaults(renumber = False) parser.set_defaults(renumber = False,
parser.set_defaults(grid = ['0','0','0']) grid = ['0','0','0'],
parser.set_defaults(size = ['0.0','0.0','0.0']) size = ['0.0','0.0','0.0'],
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
#--- setup file handles --------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'input':open(name), buffered = False, labeled = False)
'output':open(name+'_tmp','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],labels=False)
table.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
newInfo = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'microstructures': 0,
}
extra_header = []
for header in table.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
continue if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if np.any(info['size'] <= 0.0): if errors != []:
file['croak'].write('invalid size x y z.\n') table.croak(errors)
table.close(dismiss = True)
continue continue
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i')
i = 0
while table.data_read(): microstructure = table.microstructure_read(info['grid']) # read microstructure
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
# --- do work ------------------------------------------------------------------------------------ # --- do work ------------------------------------------------------------------------------------
newInfo = {
'grid': np.zeros(3,'i'),
'origin': np.zeros(3,'d'),
'microstructures': 0,
}
newInfo['grid'] = np.array([{True:round(o*float(n.translate(None,'xX'))), False: round(float(n.translate(None,'xX')))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i') newInfo['grid'] = np.array([{True:round(o*float(n.translate(None,'xX'))), False: round(float(n.translate(None,'xX')))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i')
newInfo['size'] = np.array([{True: o*float(n.translate(None,'xX')) , False: float(n.translate(None,'xX')) }[n[-1].lower() == 'x'] for o,n in zip(info['size'],options.size)],'d') newInfo['size'] = np.array([{True: o*float(n.translate(None,'xX')) , False: float(n.translate(None,'xX')) }[n[-1].lower() == 'x'] for o,n in zip(info['size'],options.size)],'d')
newInfo['grid'] = np.where(newInfo['grid'] <= 0 , info['grid'],newInfo['grid']) newInfo['grid'] = np.where(newInfo['grid'] <= 0 , info['grid'],newInfo['grid'])
@ -150,6 +103,7 @@ for file in files:
multiplicity[2], axis=2) multiplicity[2], axis=2)
# --- renumber to sequence 1...Ngrains if requested ------------------------------------------------ # --- renumber to sequence 1...Ngrains if requested ------------------------------------------------
# http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array # http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array
if options.renumber: if options.renumber:
newID = 0 newID = 0
for microstructureID,count in enumerate(np.bincount(microstructure.reshape(newInfo['grid'].prod()))): for microstructureID,count in enumerate(np.bincount(microstructure.reshape(newInfo['grid'].prod()))):
@ -160,42 +114,45 @@ for file in files:
newInfo['microstructures'] = microstructure.max() newInfo['microstructures'] = microstructure.max()
# --- report --------------------------------------------------------------------------------------- # --- report ---------------------------------------------------------------------------------------
if (any(newInfo['grid'] != info['grid'])):
file['croak'].write('--> grid a b c: %s\n'%(' x '.join(map(str,newInfo['grid']))))
if (any(newInfo['size'] != info['size'])):
file['croak'].write('--> size x y z: %s\n'%(' x '.join(map(str,newInfo['size']))))
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
if np.any(newInfo['grid'] < 1): remarks = []
file['croak'].write('invalid new grid a b c.\n') errors = []
continue
if np.any(newInfo['size'] <= 0.0): if (any(newInfo['grid'] != info['grid'])): remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid']))))
file['croak'].write('invalid new size x y z.\n') if (any(newInfo['size'] != info['size'])): remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size']))))
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
if np.any(newInfo['grid'] < 1): errors.append('invalid new grid a b c.')
if np.any(newInfo['size'] <= 0.0): errors.append('invalid new size x y z.')
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(newInfo['grid'][0],newInfo['grid'][1],newInfo['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
"size\tx %f\ty %f\tz %f"%(newInfo['size'][0],newInfo['size'][1],newInfo['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
]) ])
table.labels_clear()
table.head_write() table.head_write()
table.output_flush() table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure.max())+1)) formatwidth = int(math.floor(math.log10(microstructure.max())+1))
table.data = microstructure.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose() table.data = microstructure.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close()
table.output_close()
os.rename(file['name']+'_tmp',file['name'])
table.close() # close ASCII table
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -13,125 +13,78 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile[s]]', description = """
Produce ASCIItable of structure data from geom description Produce ASCIItable of structure data from geom description
""", version = scriptID) """, version = scriptID)
parser.add_option('-p','--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for position [%default]')
parser.set_defaults(position = 'pos',
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
#--- setup file handles -------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.txt',
'croak':sys.stdout, buffered = False, labeled = False)
}) table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN':
file['input'] = open(file['name'])
file['output'] = open(os.path.splitext(file['name'])[0]+'.txt','w')
file['croak'].write('\033[1m' + scriptName + '\033[0m' + (': '+file['name'] if file['name'] != 'STDIN' else '') + '\n')
theTable = damask.ASCIItable(file['input'],file['output'],labels = False)
theTable.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = { table.head_read()
'grid': np.zeros(3,'i'), info,extra_header = table.head_getGeom()
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
for header in theTable.info: table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
headitems = map(str.lower,header.split()) 'size x y z: %s'%(' x '.join(map(str,info['size']))),
if len(headitems) == 0: continue 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
if headitems[0] in mappings.keys(): 'homogenization: %i'%info['homogenization'],
if headitems[0] in identifiers.keys(): 'microstructures: %i'%info['microstructures'],
for i in xrange(len(identifiers[headitems[0]])): ])
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ errors = []
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
'homogenization: %i\n'%info['homogenization'] + \ if errors != []:
'microstructures: %i\n'%info['microstructures']) table.croak(errors)
table.close(dismiss = True)
if np.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']) # read microstructure
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
theTable.labels_clear() table.info_clear()
theTable.labels_append(['%i_pos'%(i+1) for i in range(3)]+['microstructure']) table.info_append(extra_header + [scriptID + '\t' + ' '.join(sys.argv[1:])])
table.labels_clear()
theTable.head_write() table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()
#--- generate grid -------------------------------------------------------------------------------- #--- generate grid --------------------------------------------------------------------------------
xx = np.arange(float(info['grid'][0]))/info['grid'][0]*info['size'][0]+info['origin'][0] x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0]
yy = np.arange(float(info['grid'][1]))/info['grid'][1]*info['size'][1]+info['origin'][1] y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1]
zz = np.arange(float(info['grid'][2]))/info['grid'][2]*info['size'][2]+info['origin'][2] z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2]
#--- read microstructure information -------------------------------------------------------------- xx = np.tile( x, info['grid'][1]* info['grid'][2])
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
i = 0 table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))
outputAlive = True table.data_writeArray()
while outputAlive and theTable.data_read():
items = theTable.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
for item in items:
theTable.data = [xx[ i%info['grid'][0]],
yy[(i/info['grid'][0])%info['grid'][1]],
zz[ i/info['grid'][0]/info['grid'][1]],
item]
i += 1
outputAlive = theTable.data_write() # output processed line
if not outputAlive: break
# ------------------------------------------ finalize output --------------------------------------- # ------------------------------------------ finalize output ---------------------------------------
theTable.output_flush() # just in case of buffered ASCII table table.close()
if file['name'] != 'STDIN':
file['input'].close() # close input ASCII table
file['output'].close() # close output ASCII table

View File

@ -12,35 +12,29 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
translate microstructure indices (shift or substitute) and/or geometry origin. translate microstructure indices (shift or substitute) and/or geometry origin.
""", version=scriptID) """, version=scriptID)
parser.add_option('-o', '--origin', dest='origin', type='float', nargs = 3, parser.add_option('-o', '--origin',
help='offset from old to new origin of grid', metavar=' '.join(['float']*3)) dest = 'origin',
parser.add_option('-m', '--microstructure', dest='microstructure', type='int', type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help='offset from old to new microstructure indices', metavar='int') help = 'offset from old to new origin of grid')
parser.add_option('-s', '--substitute', action='extend', dest='substitute', parser.add_option('-m', '--microstructure',
help='substitutions of microstructure indices from,to,from,to,...', metavar='<string LIST>') dest = 'microstructure',
type = 'int', metavar = 'int',
help = 'offset from old to new microstructure indices')
parser.add_option('-s', '--substitute',
dest = 'substitute',
action = 'extend', metavar = '<string LIST>',
help = 'substitutions of microstructure indices from,to,from,to,...')
parser.set_defaults(origin = (0.0,0.0,0.0)) parser.set_defaults(origin = (0.0,0.0,0.0),
parser.set_defaults(microstructure = 0) microstructure = 0,
parser.set_defaults(substitute = []) substitute = [],
parser.set_defaults(twoD = False) )
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
@ -48,86 +42,47 @@ sub = {}
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to" for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1]) sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1])
#--- setup file handles --------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'input':open(name), buffered = False, labeled = False)
'output':open(name+'_tmp','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
theTable = damask.ASCIItable(file['input'],file['output'],labels=False)
theTable.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'), table.head_read()
'size': np.zeros(3,'d'), info,extra_header = table.head_getGeom()
'origin': np.zeros(3,'d'),
'homogenization': 0, table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'microstructures': 0, 'size x y z: %s'%(' x '.join(map(str,info['size']))),
} 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']) # read microstructure
# --- do work ------------------------------------------------------------------------------------
newInfo = { newInfo = {
'origin': np.zeros(3,'d'), 'origin': np.zeros(3,'d'),
'microstructures': 0, 'microstructures': 0,
} }
extra_header = []
for header in theTable.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
if np.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue
#--- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i')
i = 0
while theTable.data_read():
items = theTable.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
#--- do work ------------------------------------------------------------------------------------
substituted = np.copy(microstructure) substituted = np.copy(microstructure)
for k, v in sub.iteritems(): substituted[microstructure==k] = v # substitute microstructure indices for k, v in sub.iteritems(): substituted[microstructure==k] = v # substitute microstructure indices
@ -137,32 +92,34 @@ for file in files:
newInfo['microstructures'] = substituted.max() newInfo['microstructures'] = substituted.max()
# --- report --------------------------------------------------------------------------------------- # --- report ---------------------------------------------------------------------------------------
if (any(newInfo['origin'] != info['origin'])):
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin'])))) remarks = []
if (newInfo['microstructures'] != info['microstructures']): if (any(newInfo['origin'] != info['origin'])): remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin']))))
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures']) if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
if remarks != []: file['croak'](remarks)
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
theTable.labels_clear()
theTable.info_clear() table.labels_clear()
theTable.info_append(extra_header+[ table.info_clear()
table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %f\ty %f\tz %f"%(newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=newInfo['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
]) ])
theTable.head_write() table.head_write()
theTable.output_flush() table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(substituted.max())+1))
theTable.data = substituted.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() formatwidth = int(math.floor(math.log10(microstructure.max())+1))
theTable.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
theTable.input_close() table.close() # close ASCII table
theTable.output_close() if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name'])

View File

@ -12,122 +12,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (exclusively in one line). Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (exclusively in one line).
""", version = scriptID) """, version = scriptID)
parser.add_option('-1', '--onedimensional', dest='oneD', action='store_true', \ parser.add_option('-1', '--onedimensional',
dest = 'oneD',
action = 'store_true',
help = 'output geom file with one-dimensional data arrangement [%default]') help = 'output geom file with one-dimensional data arrangement [%default]')
parser.set_defaults(oneD = False) parser.set_defaults(oneD = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ------------------------------------ # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) table = damask.ASCIItable(name = name, outname = name+'_tmp',
buffered = False, labeled = False)
# ------------------------------------------ loop over input files --------------------------------- table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
extra_header = []
for header in table.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue
#--- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i')
i = 0
while table.data_read(): # read next data line of ASCII table
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
table.labels_clear() table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %e\ty %e\tz %e"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %e\ty %e\tz %e"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(info['microstructures']), "microstructures\t{microstructures}".format(microstructures=info['microstructures']),
]) ])
table.head_write() table.head_write()
table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
if options.oneD: microstructure = table.microstructure_read(info['grid']) # read microstructure
table.data = microstructure formatwidth = int(math.floor(math.log10(microstructure.max())+1)) # efficient number printing format
else: if options.oneD: table.data = microstructure
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() else: table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close() # close input ASCII table table.close() # close ASCII table
table.output_close() # close input ASCII table if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -13,18 +13,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Offset microstructure index for points which see a microstructure different from themselves within a given (cubic) vicinity, Offset microstructure index for points which see a microstructure different from themselves within a given (cubic) vicinity,
@ -32,131 +20,98 @@ i.e. within the region close to a grain/phase boundary.
""", version = scriptID) """, version = scriptID)
parser.add_option('-v', '--vicinity', dest='vicinity', type='int', metavar='int', \ parser.add_option('-v', '--vicinity',
dest = 'vicinity',
type = 'int', metavar = 'int',
help = 'voxel distance checked for presence of other microstructure [%default]') help = 'voxel distance checked for presence of other microstructure [%default]')
parser.add_option('-m', '--microstructureoffset', dest='offset', type='int', metavar='int', \ parser.add_option('-m', '--microstructureoffset',
help='offset (positive or negative) for tagged microstructure. '+ dest='offset',
type = 'int', metavar = 'int',
help = 'offset (positive or negative) for tagged microstructure indices. '+
'"0" selects maximum microstructure index [%default]') '"0" selects maximum microstructure index [%default]')
parser.set_defaults(vicinity = 1) parser.set_defaults(vicinity = 1,
parser.set_defaults(offset = 0) offset = 0,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
#--- setup file handles -------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = name+'_tmp',
'input':open(name), buffered = False, labeled = False)
'output':open(name+'_tmp','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'],labels=False)
table.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
newInfo = {
'microstructures': 0,
}
extra_header = []
for header in table.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
continue if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if np.any(info['size'] <= 0.0): if errors != []:
file['croak'].write('invalid size x y z.\n') table.croak(errors)
table.close(dismiss = True)
continue continue
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i')
i = 0
table.data_rewind()
while table.data_read():
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items) microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
microstructure[i:i+s] = items
i += s
# --- do work ------------------------------------------------------------------------------------ # --- do work ------------------------------------------------------------------------------------
microstructure = microstructure.reshape(info['grid'],order='F')
if options.offset == 0: newInfo = {
options.offset = microstructure.max() 'microstructures': 0,
}
if options.offset == 0: options.offset = microstructure.max()
microstructure = np.where(ndimage.filters.maximum_filter(microstructure,size=1+2*options.vicinity,mode='wrap') == microstructure = np.where(ndimage.filters.maximum_filter(microstructure,size=1+2*options.vicinity,mode='wrap') ==
ndimage.filters.minimum_filter(microstructure,size=1+2*options.vicinity,mode='wrap'), ndimage.filters.minimum_filter(microstructure,size=1+2*options.vicinity,mode='wrap'),
microstructure, microstructure + options.offset) microstructure, microstructure + options.offset)
newInfo['microstructures'] = microstructure.max() newInfo['microstructures'] = microstructure.max()
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures']) # --- report ---------------------------------------------------------------------------------------
remarks = []
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
if remarks != []: file['croak'](remarks)
# --- write header --------------------------------------------------------------------------------- # --- write header ---------------------------------------------------------------------------------
table.labels_clear() table.labels_clear()
table.info_clear() table.info_clear()
table.info_append(extra_header+[ table.info_append(extra_header+[
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t%i"%info['homogenization'], "homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t%i"%(newInfo['microstructures']), "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
]) ])
table.head_write() table.head_write()
table.output_flush() table.output_flush()
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure.max())+1)) formatwidth = int(math.floor(math.log10(microstructure.max())+1))
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
table.input_close()
table.output_close()
os.rename(file['name']+'_tmp',file['name'])
table.close() # close ASCII table
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new

View File

@ -12,54 +12,46 @@ scriptName = scriptID.split()[1]
def integerFactorization(i): def integerFactorization(i):
j = int(math.floor(math.sqrt(float(i)))) j = int(math.floor(math.sqrt(float(i))))
while (j>1 and int(i)%j != 0): while j>1 and int(i)%j != 0:
j -= 1 j -= 1
return j return j
def positiveRadians(angle):
angle = math.radians(float(angle)) def TSLheader(sizeX,sizeY,step):
while angle < 0.0:
angle += 2.0*math.pi
return angle return [
'# TEM_PIXperUM 1.000000',
'# x-star 0.509548',
def getHeader(sizeX,sizeY,step): '# y-star 0.795272',
'# z-star 0.611799',
return [ \ '# WorkingDistance 18.000000',
'# TEM_PIXperUM 1.000000', \ '#',
'# x-star 0.509548', \ '# Phase 1',
'# y-star 0.795272', \ '# MaterialName Al',
'# z-star 0.611799', \ '# Formula Fe',
'# WorkingDistance 18.000000', \ '# Info',
'#', \ '# Symmetry 43',
'# Phase 1', \ '# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000',
'# MaterialName Al', \ '# NumberFamilies 4',
'# Formula Fe', \ '# hklFamilies 1 1 0 1 0.000000 1',
'# Info', \ '# hklFamilies 2 0 0 1 0.000000 1',
'# Symmetry 43', \ '# hklFamilies 2 1 1 1 0.000000 1',
'# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000', \ '# hklFamilies 3 1 0 1 0.000000 1',
'# NumberFamilies 4', \ '# Categories 0 0 0 0 0 ',
'# hklFamilies 1 1 0 1 0.000000 1', \ '#',
'# hklFamilies 2 0 0 1 0.000000 1', \ '# GRID: SquareGrid',
'# hklFamilies 2 1 1 1 0.000000 1', \ '# XSTEP: ' + str(step),
'# hklFamilies 3 1 0 1 0.000000 1', \ '# YSTEP: ' + str(step),
'# Categories 0 0 0 0 0 ', \ '# NCOLS_ODD: ' + str(sizeX),
'#', \ '# NCOLS_EVEN: ' + str(sizeX),
'# GRID: SquareGrid', \ '# NROWS: ' + str(sizeY),
'# XSTEP: ' + str(step), \ '#',
'# YSTEP: ' + str(step), \ '# OPERATOR: ODFsammpling',
'# NCOLS_ODD: ' + str(sizeX), \ '#',
'# NCOLS_EVEN: ' + str(sizeX), \ '# SAMPLEID: ',
'# NROWS: ' + str(sizeY), \ '#',
'#', \ '# SCANID: ',
'# OPERATOR: ODFsammpling', \ '#',
'#', \
'# SAMPLEID: ', \
'#', \
'# SCANID: ', \
'#', \
] ]
def binAsBins(bin,intervals): def binAsBins(bin,intervals):
@ -128,12 +120,13 @@ def directInversion (ODF,nSamples):
incFactor = 1.0 incFactor = 1.0
nInvSamplesUpper = directInvRepetitions(ODF['dV_V'],scaleUpper) nInvSamplesUpper = directInvRepetitions(ODF['dV_V'],scaleUpper)
nIter += 1 nIter += 1
file['croak'].write('%i:(%12.11f,%12.11f) %i <= %i <= %i\n'\ table.croak('%i:(%12.11f,%12.11f) %i <= %i <= %i'%(nIter,scaleLower,scaleUpper,
%(nIter,scaleLower,scaleUpper,nInvSamplesLower,nOptSamples,nInvSamplesUpper)) nInvSamplesLower,nOptSamples,nInvSamplesUpper))
nInvSamples = nInvSamplesUpper nInvSamples = nInvSamplesUpper
scale = scaleUpper scale = scaleUpper
file['croak'].write('created set of %i samples (%12.11f) with scaling %12.11f delivering %i\n'\ table.croak('created set of %i samples (%12.11f) with scaling %12.11f delivering %i'%(nInvSamples,
%(nInvSamples,float(nInvSamples)/nOptSamples-1.0,scale,nSamples)) float(nInvSamples)/nOptSamples-1.0,
scale,nSamples))
repetition = [None]*ODF['nBins'] # preallocate and clear repetition = [None]*ODF['nBins'] # preallocate and clear
for bin in range(ODF['nBins']): # loop over bins for bin in range(ODF['nBins']): # loop over bins
@ -146,16 +139,16 @@ def directInversion (ODF,nSamples):
set[i:i+repetition[bin]] = [bin]*repetition[bin] # fill set with bin, i.e. orientation set[i:i+repetition[bin]] = [bin]*repetition[bin] # fill set with bin, i.e. orientation
i += repetition[bin] # advance set counter i += repetition[bin] # advance set counter
orientations = [None]*nSamples orientations = np.zeros((nSamples,3),'f')
reconstructedODF = [0.0]*ODF['nBins'] reconstructedODF = np.zeros(ODF['nBins'],'f')
unitInc = 1.0/nSamples unitInc = 1.0/nSamples
for j in range(nSamples): for j in range(nSamples):
if (j == nInvSamples-1): ex = j if (j == nInvSamples-1): ex = j
else: ex = int(round(random.uniform(j+0.5,nInvSamples-0.5))) else: ex = int(round(random.uniform(j+0.5,nInvSamples-0.5)))
bin = set[ex] bin = set[ex]
bins = binAsBins(bin,ODF['interval']) bins = binAsBins(bin,ODF['interval']) # PE: why are we doing this??
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center']) Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) ) orientations[j] = np.degrees(Eulers)
reconstructedODF[bin] += unitInc reconstructedODF[bin] += unitInc
set[ex] = set[j] # exchange orientations set[ex] = set[j] # exchange orientations
@ -169,8 +162,8 @@ def MonteCarloEulers (ODF,nSamples):
countMC = 0 countMC = 0
maxdV_V = max(ODF['dV_V']) maxdV_V = max(ODF['dV_V'])
orientations = [None]*nSamples orientations = np.zeros((nSamples,3),'f')
reconstructedODF = [0.0]*ODF['nBins'] reconstructedODF = np.zeros(ODF['nBins'],'f')
unitInc = 1.0/nSamples unitInc = 1.0/nSamples
for j in range(nSamples): for j in range(nSamples):
@ -182,7 +175,7 @@ def MonteCarloEulers (ODF,nSamples):
Eulers = [limit*random.random() for limit in ODF['limit']] Eulers = [limit*random.random() for limit in ODF['limit']]
bins = EulersAsBins(Eulers,ODF['interval'],ODF['delta'],ODF['center']) bins = EulersAsBins(Eulers,ODF['interval'],ODF['delta'],ODF['center'])
bin = binsAsBin(bins,ODF['interval']) bin = binsAsBin(bins,ODF['interval'])
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) ) orientations[j] = np.degrees(Eulers)
reconstructedODF[bin] += unitInc reconstructedODF[bin] += unitInc
return orientations, reconstructedODF, countMC return orientations, reconstructedODF, countMC
@ -193,8 +186,8 @@ def MonteCarloBins (ODF,nSamples):
countMC = 0 countMC = 0
maxdV_V = max(ODF['dV_V']) maxdV_V = max(ODF['dV_V'])
orientations = [None]*nSamples orientations = np.zeros((nSamples,3),'f')
reconstructedODF = [0.0]*ODF['nBins'] reconstructedODF = np.zeros(ODF['nBins'],'f')
unitInc = 1.0/nSamples unitInc = 1.0/nSamples
for j in range(nSamples): for j in range(nSamples):
@ -205,7 +198,7 @@ def MonteCarloBins (ODF,nSamples):
MC = maxdV_V*random.random() MC = maxdV_V*random.random()
bin = int(ODF['nBins'] * random.random()) bin = int(ODF['nBins'] * random.random())
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center']) Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) ) orientations[j] = np.degrees(Eulers)
reconstructedODF[bin] += unitInc reconstructedODF[bin] += unitInc
return orientations, reconstructedODF return orientations, reconstructedODF
@ -214,8 +207,8 @@ def MonteCarloBins (ODF,nSamples):
def TothVanHoutteSTAT (ODF,nSamples): def TothVanHoutteSTAT (ODF,nSamples):
""" ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """ """ ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """
orientations = [None]*nSamples orientations = np.zeros((nSamples,3),'f')
reconstructedODF = [0.0]*ODF['nBins'] reconstructedODF = np.zeros(ODF['nBins'],'f')
unitInc = 1.0/nSamples unitInc = 1.0/nSamples
selectors = [random.random() for i in range(nSamples)] selectors = [random.random() for i in range(nSamples)]
@ -229,11 +222,12 @@ def TothVanHoutteSTAT (ODF,nSamples):
cumdV_V += ODF['dV_V'][bin] cumdV_V += ODF['dV_V'][bin]
while indexSelector < nSamples and selectors[indexSelector] < cumdV_V: while indexSelector < nSamples and selectors[indexSelector] < cumdV_V:
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center']) Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
orientations[countSamples] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) ) orientations[countSamples] = np.degrees(Eulers)
reconstructedODF[bin] += unitInc reconstructedODF[bin] += unitInc
countSamples += 1 countSamples += 1
indexSelector += 1 indexSelector += 1
file['croak'].write('created set of %i when asked to deliver %i\n'%(countSamples,nSamples))
table.croak('created set of %i when asked to deliver %i'%(countSamples,nSamples))
return orientations, reconstructedODF return orientations, reconstructedODF
@ -242,73 +236,83 @@ def TothVanHoutteSTAT (ODF,nSamples):
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Transform linear binned data into Euler angles. Transform linear binned ODF data into given number of orientations.
""", version = scriptID) """, version = scriptID)
parser.add_option('-n', '--nsamples', dest='number', type='int', metavar = 'int', parser.add_option('-n', '--nsamples',
dest = 'number',
type = 'int', metavar = 'int',
help = 'number of orientations to be generated [%default]') help = 'number of orientations to be generated [%default]')
parser.add_option('-a','--algorithm', dest='algorithm', type='string', metavar = 'string', parser.add_option('-a','--algorithm',
help='sampling algorithm. IA: direct inversion, STAT: Van Houtte, MC: Monte Carlo. [%default].') #make choice dest = 'algorithm',
parser.add_option('-p','--phase', dest='phase', type='int', metavar = 'int', type = 'string', metavar = 'string',
help = 'sampling algorithm. IA: integral approximation, STAT: Van Houtte, MC: Monte Carlo. [%default].') #make choice
parser.add_option('-p','--phase',
dest = 'phase',
type = 'int', metavar = 'int',
help = 'phase index to be used [%default]') help = 'phase index to be used [%default]')
parser.add_option('--crystallite', dest='crystallite', type='int', metavar = 'int', parser.add_option('--crystallite',
dest = 'crystallite',
type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]') help = 'crystallite index to be used [%default]')
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', \ parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar = 'int', \
help = 'seed of random number generator [%default]') help = 'seed of random number generator [%default]')
parser.add_option('--ang', dest='ang', action='store_true', parser.add_option('--ang',
help='write .ang file [%default]') dest = 'ang',
parser.set_defaults(randomSeed = None) action = 'store_true',
parser.set_defaults(number = 500) help = 'write TSL/EDAX .ang file [%default]')
parser.set_defaults(algorithm = 'IA') parser.set_defaults(randomSeed = None,
parser.set_defaults(phase = 1) number = 500,
parser.set_defaults(crystallite = 1) algorithm = 'IA',
parser.set_defaults(ang = True) phase = 1,
crystallite = 1,
ang = True,
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
nSamples = options.number nSamples = options.number
methods = [options.algorithm] methods = [options.algorithm]
#--- setup file handles --------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN','input':sys.stdin,'output':sys.stdout,'outang':sys.stdout,'croak':sys.stderr})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name,'input':open(name),'output':open(name+'_tmp','w'),'outang':open(name+'_ang_tmp','w'),'croak':sys.stdout}) table = damask.ASCIItable(name = name, outname = None,
buffered = False, readonly = True)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
#--- loop over input files ------------------------------------------------------------------------ randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file for second phase
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'],buffered = False)
table.head_read()
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase
random.seed(randomSeed) random.seed(randomSeed)
# --------------- figure out columns in table ----------- ----------------------------------------- # ------------------------------------------ read header ---------------------------------------
column = {}
pos = 0 table.head_read()
keys = ['phi1','Phi','phi2','intensity']
for key in keys: errors = []
if key not in table.labels: labels = ['phi1','Phi','phi2','intensity']
file['croak'].write('column %s not found...\n'%key) for i,index in enumerate(table.label_index(labels)):
else: if index < 0: errors.append('label {} not present.'.format(labels[i])
column[key] = pos
pos+=1 if errors != []:
if pos != 4: continue table.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ read data ---------------------------------------
binnedODF = table.data_readArray(labels)
binnedODF = table.data_readArray(keys)
# --------------- figure out limits (left/right), delta, and interval ----------------------------- # --------------- figure out limits (left/right), delta, and interval -----------------------------
ODF = {} ODF = {}
limits = np.array([[np.min(table.data[:,column['phi1']]),\ limits = np.array([np.min(table.data,axis=0),
np.min(table.data[:,column['Phi']]),\ np.max(table.data,axis=0)])
np.min(table.data[:,column['phi2']])],\
[np.max(table.data[:,column['phi1']]),\
np.max(table.data[:,column['Phi']]),\
np.max(table.data[:,column['phi2']])]])
ODF['limit'] = np.radians(limits[1,:]) ODF['limit'] = np.radians(limits[1,:])
if all(limits[0,:]<1e-8): # vertex centered if all(limits[0,:]<1e-8): # vertex centered
@ -319,13 +323,13 @@ for file in files:
eulers = [{},{},{}] eulers = [{},{},{}]
for i in xrange(table.data.shape[0]): for i in xrange(table.data.shape[0]):
for j in xrange(3): for j in xrange(3):
eulers[j][str(table.data[i,column[keys[j]]])] = True # remember eulers along phi1, Phi, and phi2 eulers[j][str(table.data[i,j]])] = True # remember eulers along phi1, Phi, and phi2
ODF['interval'] = np.array([len(eulers[0]),len(eulers[1]),len(eulers[2]),],'i') # steps are number of distict values ODF['interval'] = np.array([len(eulers[0]),len(eulers[1]),len(eulers[2]),],'i') # steps are number of distict values
ODF['nBins'] = ODF['interval'].prod() ODF['nBins'] = ODF['interval'].prod()
ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1)) ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1))
if binnedODF[0] != ODF['nBins']: if binnedODF[0] != ODF['nBins']:
file['croak'].write('expecting %i values but got %i'%(ODF['nBins'],len(linesBinnedODF))) table.croak('expecting %i values but got %i'%(ODF['nBins'],len(linesBinnedODF)))
continue continue
# build binnedODF array # build binnedODF array
@ -343,11 +347,12 @@ for file in files:
for b in range(ODF['nBins']): ODF['dV_V'][b] /= sumdV_V # normalize dV/V for b in range(ODF['nBins']): ODF['dV_V'][b] /= sumdV_V # normalize dV/V
file['croak'].write('non-zero fraction: %12.11f (%i/%i)\n'\ table.croak(['non-zero fraction: %12.11f (%i/%i)'%(float(ODF['nNonZero'])/ODF['nBins'],
%(float(ODF['nNonZero'])/ODF['nBins'],ODF['nNonZero'],ODF['nBins'])) ODF['nNonZero'],
file['croak'].write('Volume integral of ODF: %12.11f\n'%sumdV_V) ODF['nBins']),
file['croak'].write('Reference Integral: %12.11f\n'\ 'Volume integral of ODF: %12.11f\n'%sumdV_V,
%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1])))) 'Reference Integral: %12.11f\n'%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1]))),
])
# call methods # call methods
Functions = {'IA': 'directInversion', 'STAT': 'TothVanHoutteSTAT', 'MC': 'MonteCarloBins'} Functions = {'IA': 'directInversion', 'STAT': 'TothVanHoutteSTAT', 'MC': 'MonteCarloBins'}
@ -372,66 +377,72 @@ for file in files:
indivSum['orig'] += ODF['dV_V'][bin] indivSum['orig'] += ODF['dV_V'][bin]
indivSquaredSum['orig'] += ODF['dV_V'][bin]**2 indivSquaredSum['orig'] += ODF['dV_V'][bin]**2
file['croak'].write('sqrt(N*)RMSD of ODFs:\t %12.11f\n'% math.sqrt(nSamples*squaredDiff[method])) table.croak(['sqrt(N*)RMSD of ODFs:\t %12.11f'% math.sqrt(nSamples*squaredDiff[method]),
file['croak'].write('RMSrD of ODFs:\t %12.11f\n'%math.sqrt(squaredRelDiff[method])) 'RMSrD of ODFs:\t %12.11f'%math.sqrt(squaredRelDiff[method]),
file['croak'].write('rMSD of ODFs:\t %12.11f\n'%(squaredDiff[method]/indivSquaredSum['orig'])) 'rMSD of ODFs:\t %12.11f'%(squaredDiff[method]/indivSquaredSum['orig']),
file['croak'].write('nNonZero correlation slope:\t %12.11f\n'\ 'nNonZero correlation slope:\t %12.11f'\
%((ODF['nNonZero']*mutualProd[method]-indivSum['orig']*indivSum[method])/\ %((ODF['nNonZero']*mutualProd[method]-indivSum['orig']*indivSum[method])/\
(ODF['nNonZero']*indivSquaredSum['orig']-indivSum['orig']**2))) (ODF['nNonZero']*indivSquaredSum['orig']-indivSum['orig']**2)),
file['croak'].write( 'nNonZero correlation confidence:\t %12.11f\n'\ 'nNonZero correlation confidence:\t %12.11f'\
%((mutualProd[method]-indivSum['orig']*indivSum[method]/ODF['nNonZero'])/\ %((mutualProd[method]-indivSum['orig']*indivSum[method]/ODF['nNonZero'])/\
(ODF['nNonZero']*math.sqrt((indivSquaredSum['orig']/ODF['nNonZero']-(indivSum['orig']/ODF['nNonZero'])**2)*\ (ODF['nNonZero']*math.sqrt((indivSquaredSum['orig']/ODF['nNonZero']-(indivSum['orig']/ODF['nNonZero'])**2)*\
(indivSquaredSum[method]/ODF['nNonZero']-(indivSum[method]/ODF['nNonZero'])**2))))) (indivSquaredSum[method]/ODF['nNonZero']-(indivSum[method]/ODF['nNonZero'])**2)))),
])
if method == 'IA' and nSamples < ODF['nNonZero']: if method == 'IA' and nSamples < ODF['nNonZero']:
strOpt = '(%i)'%ODF['nNonZero'] strOpt = '(%i)'%ODF['nNonZero']
formatwidth = 1 formatwidth = 1+int(math.log10(nSamples))
file['output'].write('#' + scriptID + ' ' + ' '.join(sys.argv[1:])+'\n')
file['output'].write('# random seed %i\n'%randomSeed) materialConfig = [
file['output'].write('#-------------------#') '#' + scriptID + ' ' + ' '.join(sys.argv[1:]),
file['output'].write('\n<microstructure>\n') '# random seed %i'%randomSeed
file['output'].write('#-------------------#\n') '#-------------------#',
'<microstructure>',
'#-------------------#',
]
for i,ID in enumerate(xrange(nSamples)): for i,ID in enumerate(xrange(nSamples)):
file['output'].write('[Grain%s]\n'%(str(ID+1).zfill(formatwidth)) + \ materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
'crystallite %i\n'%options.crystallite + \ 'crystallite %i'%options.crystallite,
'(constituent) phase %i texture %s fraction 1.0\n'%(options.phase,str(ID+1).rjust(formatwidth))) '(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)),
]
materialConfig += [
'#-------------------#',
'<texture>',
'#-------------------#',
]
file['output'].write('\n#-------------------#')
file['output'].write('\n<texture>\n')
file['output'].write('#-------------------#\n')
for ID in xrange(nSamples): for ID in xrange(nSamples):
eulers = re.split(r'[\t]', Orientations[ID].strip()) eulers = Orientations[ID]
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
'(gauss) phi1 %10.5f Phi %10.5f phi2 %10.6f scatter 0.0 fraction 1.0'%(*eulers),
]
file['output'].write('[Grain%s]\n'%(str(ID+1).zfill(formatwidth)) + \
'(gauss) phi1 %10.5f Phi %10.5f phi2 %10.6f scatter 0.0 fraction 1.0\n'\
%(float(eulers[0]),float(eulers[1]),float(eulers[2])))
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['output'].close() with (open(os.path.splitext(name)[0]+'_'+method+'_'+str(nSamples)+'_material.config','w') as outfile:
os.rename(file['name']+'_tmp', outfile.write('\n'.join(materialConfig)+'\n')
os.path.splitext(file['name'])[0] +'_'+method+'_'+str(nSamples)+'%s'%('_material.config'))
# write ang file # write ang file
if options.ang: if options.ang:
with open(os.path.splitext(name)[0]+'_'+method+'_'+str(nSamples)+'.ang','w') as outfile:
sizeY = integerFactorization(nSamples) sizeY = integerFactorization(nSamples)
sizeX = nSamples / sizeY sizeX = nSamples / sizeY
print 'Writing .ang file: %i * %i = %i (== %i)'%(sizeX,sizeY,sizeX*sizeY,nSamples) table.croak('Writing .ang file: %i * %i = %i (== %i)'%(sizeX,sizeY,sizeX*sizeY,nSamples))
# write header # write header
for line in getHeader(sizeX,sizeY,1.0): outfile.write('\n'.join(TSLheader(sizeX,sizeY,1.0))+'\n')
file['outang'].write(line + '\n')
# write data # write data
counter = 0 counter = 0
for line in Orientations: for eulers in Orientations:
eulers = re.split(r'[\t]', line.strip()) outfile.write('%10.5f %10.5f %10.5f '%(*np.radians(eulers)) +
file['outang'].write(''.join(['%10.5f'%math.radians(float(angle)) for angle in eulers])+ '%10.5f %10.5f '%(counter%sizeX,counter//sizeX) +
''.join(['%10.5f'%coord for coord in [counter%sizeX,counter//sizeX]])+
'100.0 1.0 0 1 1.0\n') '100.0 1.0 0 1 1.0\n')
counter += 1 counter += 1
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['outang'].close() table.close()
os.rename(file['name']+'_ang_tmp',
os.path.splitext(file['name'])[0] +'_'+method+'_'+str(nSamples)+'%s'%('.ang'))

View File

@ -12,136 +12,118 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [seedsfile[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog [seedsfile[s]]', description = """
Produce VTK point mesh from seeds file Produce VTK point mesh from seeds file
""", version = scriptID) """, version = scriptID)
parser.add_option('-s', '--size', dest='size', type='float', nargs = 3, metavar='float float float',\ parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'x,y,z size of hexahedral box [1.0 along largest grid point number]') help = 'x,y,z size of hexahedral box [1.0 along largest grid point number]')
parser.add_option('-p','--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for coordinates [%default]')
parser.set_defaults(size = [0.0,0.0,0.0]) parser.set_defaults(size = [0.0,0.0,0.0],
position = 'pos',
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = None,
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False, readonly = True)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} # --- interpret header ----------------------------------------------------------------------------
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],buffered = False)
table.head_read() table.head_read()
info,extra_header = table.head_getGeom()
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
if np.all(table.label_index(['1_coords','2_coords','3_coords']) != -1): remarks = []
labels = ['1_coords','2_coords','3_coords'] errors = []
elif np.all(table.label_index(['x','y','z']) != -1):
labels = ['x','y','z'] if np.any(info['grid'] < 1): remarks.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0) \
and np.all(info['grid'] < 1): errors.append('invalid size x y z.')
else: else:
file['croak'].write('no coordinate data (1/2/3_coords | x/y/z) found ...') for i in xrange(3):
if info['size'][i] <= 0.0: # any invalid size?
info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid
remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i]))
if table.label_dimension(options.position) != 3: errors.append('columns "{}" have dimension {}'.format(options.position,
table.label_dimension(options.position)))
if remarks != []: table.croak(remarks)
if errors != []:
table.croak(errors)
table.close(dismiss=True)
continue continue
labels = ['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in xrange(3)]
hasGrains = table.label_index('microstructure') != -1 hasGrains = table.label_index('microstructure') != -1
labels += ['microstructure'] if hasGrains else [] labels += ['microstructure'] if hasGrains else []
table.data_readArray(labels) # read ASCIItable columns table.data_readArray(labels) # read ASCIItable columns
coords = table.data[:,:3] # assign coordinates coords = table.data[:,:3] # assign coordinates
grain = table.data[:,3].astype('i') if hasGrains else 1+np.arange(len(coords),dtype='i') # assign grains grain = table.data[:,3].astype('i') if hasGrains else 1+np.arange(len(coords),dtype='i') # assign grains
grainIDs = np.unique(grain).astype('i') # find all grainIDs present # grainIDs = np.unique(grain).astype('i') # find all grainIDs present
#--- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.array(options.size),
'origin': np.zeros(3,'d'),
'microstructures': 0,
}
for header in table.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
if info['microstructures'] != len(grainIDs):
file['croak'].write('grain data not matching grain count (%i)...\n'%(len(grainIDs)))
info['microstructures'] = len(grainIDs)
if np.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
for i in xrange(3):
if info['size'][i] <= 0.0: # any invalid size?
info['size'][i] = float(info['grid'][i])/max(info['grid'])
file['croak'].write('rescaling size %s...\n'%{0:'x',1:'y',2:'z'}[i])
# --- generate grid -------------------------------------------------------------------------------- # --- generate grid --------------------------------------------------------------------------------
grid = vtk.vtkUnstructuredGrid() grid = vtk.vtkUnstructuredGrid()
pts = vtk.vtkPoints() pts = vtk.vtkPoints()
# --- process microstructure information -------------------------------------------------------------- # --- process microstructure information --------------------------------------------------------------
IDs = vtk.vtkIntArray() IDs = vtk.vtkIntArray()
IDs.SetNumberOfComponents(1) IDs.SetNumberOfComponents(1)
IDs.SetName("GrainID") IDs.SetName("GrainID")
for i,item in enumerate(coords): for i,item in enumerate(coords):
IDs.InsertNextValue(grain[i])
pid = pts.InsertNextPoint(item[0:3]) pid = pts.InsertNextPoint(item[0:3])
pointIds = vtk.vtkIdList() pointIds = vtk.vtkIdList()
pointIds.InsertId(0, pid) pointIds.InsertId(0, pid)
grid.InsertNextCell(1, pointIds) grid.InsertNextCell(1, pointIds)
IDs.InsertNextValue(grain[i])
grid.SetPoints(pts) grid.SetPoints(pts)
grid.GetCellData().AddArray(IDs) grid.GetCellData().AddArray(IDs)
# --- write data ----------------------------------------------------------------------------------- # --- write data -----------------------------------------------------------------------------------
if file['name'] == 'STDIN':
if name == 'STDIN':
writer = vtk.vtkUnstructuredGridWriter() writer = vtk.vtkUnstructuredGridWriter()
writer.WriteToOutputStringOn() writer.WriteToOutputStringOn()
writer.SetFileTypeToASCII() writer.SetFileTypeToASCII()
writer.SetHeader('# powered by '+scriptID) writer.SetHeader('# powered by '+scriptID)
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
writer.SetInput(grid) else: writer.SetInputData(grid)
else:
writer.SetInputData(grid)
writer.Write() writer.Write()
sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
else: else:
table.close(dismiss=True) (dir,filename) = os.path.split(name)
(head,tail) = os.path.split(file['name'])
writer = vtk.vtkXMLUnstructuredGridWriter() writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetDataModeToBinary() writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.join(head,'seeds_'+os.path.splitext(tail)[0] writer.SetFileName(os.path.join(dir,'seeds_'+os.path.splitext(filename)[0]
+'.'+writer.GetDefaultFileExtension())) +'.'+writer.GetDefaultFileExtension()))
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
writer.SetInput(grid) else: writer.SetInputData(grid)
else:
writer.SetInputData(grid)
writer.Write() writer.Write()
table.close()

View File

@ -12,155 +12,103 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Create seed file taking microstructure indices from given geom file but excluding black-listed grains. Create seed file taking microstructure indices from given geom file but excluding black-listed grains.
""", version = scriptID) """, version = scriptID)
parser.add_option('-w','--white', dest='whitelist', action='extend', \ parser.add_option('-w','--white',
help='white list of grain IDs', metavar='<LIST>') action = 'extend', metavar='<int LIST>',
parser.add_option('-b','--black', dest='blacklist', action='extend', \ dest = 'whitelist',
help='black list of grain IDs', metavar='<LIST>') help = 'whitelist of grain IDs')
parser.add_option('-b','--black',
action = 'extend', metavar='<int LIST>',
dest = 'blacklist',
help = 'blacklist of grain IDs')
parser.add_option('-p','--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for coordinates [%default]')
parser.set_defaults(whitelist = []) parser.set_defaults(whitelist = [],
parser.set_defaults(blacklist = []) blacklist = [],
position = 'pos',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
options.whitelist = map(int,options.whitelist) options.whitelist = map(int,options.whitelist)
options.blacklist = map(int,options.blacklist) options.blacklist = map(int,options.blacklist)
#--- setup file handles -------------------------------------------------------------------------- # --- loop over output files -------------------------------------------------------------------------
files = []
if filenames == []: if filenames == []: filenames = ['STDIN']
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames: for name in filenames:
if os.path.exists(name): if not (name == 'STDIN' or os.path.exists(name)): continue
files.append({'name':name, table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.seeds',
'input':open(name), buffered = False, labeled = False)
'output':open(os.path.splitext(name)[0]+'.seeds','w'), table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False)
table.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
newInfo = {
'grid': np.zeros(3,'i'),
'origin': np.zeros(3,'d'),
'microstructures': 0,
}
extra_header = []
for header in table.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue # skip blank lines
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if errors != []:
table.croak(errors)
table.close(dismiss = True)
continue continue
if np.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue
if 'origin' not in info:
info['origin'] = np.zeros(3)
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
i = 0
while table.data_read():
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items) microstructure = table.microstructure_read(info['grid']) # read (linear) microstructure
microstructure[i:i+s] = items
i += s
# --- generate grid --------------------------------------------------------------------------------
x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0]
y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1]
z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2]
xx = np.tile( x, info['grid'][1]* info['grid'][2])
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != [] else np.full_like(microstructure,True,dtype=bool),
np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != [] else np.full_like(microstructure,True,dtype=bool))
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info = [ table.info_clear()
scriptID, table.info_append(extra_header+[
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), scriptID + ' ' + ' '.join(sys.argv[1:]),
"size\tx %i\ty %i\tz %i"%(info['size'][0],info['size'][1],info['size'][2],), "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
"origin\tx %i\ty %i\tz %i"%(info['origin'][0],info['origin'][1],info['origin'][2],), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
] "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
])
table.labels_clear() table.labels_clear()
table.labels_append(['1_coords','2_coords','3_coords','microstructure']) # implicitly switching label processing/writing on table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
table.head_write() table.head_write()
table.output_flush()
#--- filtering of grain voxels ------------------------------------------------------------------------------------ # --- write seeds information ------------------------------------------------------------
table.data_clear()
i = 0
outputDead = False
coord = np.zeros(3,'d')
for coord[2] in xrange(info['grid'][2]):
for coord[1] in xrange(info['grid'][1]):
for coord[0] in xrange(info['grid'][0]):
if (options.whitelist == [] and options.blacklist == []) or \
(options.whitelist != [] and microstructure[i] in options.whitelist) or \
(options.blacklist != [] and microstructure[i] not in options.blacklist):
table.data = list((coord+0.5)/info['grid'])+[microstructure[i]]
outputDead = not table.data_write()
i += 1
if outputDead: break
if outputDead: break
if outputDead: break
# ------------------------------------------ output result --------------------------------------- table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))[mask]
table.data_writeArray()
outputDead or table.output_flush() # just in case of buffered ASCII table # ------------------------------------------ finalize output ---------------------------------------
table.input_close() # close input ASCII table table.close()
if file['name'] != 'STDIN':
table.output_close() # close output ASCII table

View File

@ -11,18 +11,6 @@ scriptName = scriptID.split()[1]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Create seeds file by poking at 45 degree through given geom file. Create seeds file by poking at 45 degree through given geom file.
@ -30,101 +18,81 @@ Mimics APS Beamline 34-ID-E DAXM poking.
""", version = scriptID) """, version = scriptID)
parser.add_option('-N', '--points', dest='N', type='int', metavar='int', \ parser.add_option('-N', '--points',
dest = 'N',
type = 'int', metavar = 'int',
help = 'number of poking locations [%default]') help = 'number of poking locations [%default]')
parser.add_option('-z', '--planes', dest='z', type='float', nargs = 2, metavar='float float', \ parser.add_option('-z', '--planes',
dest = 'z',
type = 'float', nargs = 2, metavar='float float',
help = 'top and bottom z plane') help = 'top and bottom z plane')
parser.add_option('-x', action='store_true', dest='x', \ parser.add_option('-x',
action = 'store_true',
dest = 'x',
help = 'poke 45 deg along x') help = 'poke 45 deg along x')
parser.add_option('-y', action='store_true', dest='y', \ parser.add_option('-y',
action = 'store_true',
dest = 'y',
help = 'poke 45 deg along y') help = 'poke 45 deg along y')
parser.add_option('-p','--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for coordinates [%default]')
parser.set_defaults(x = False) parser.set_defaults(x = False,
parser.set_defaults(y = False) y = False,
parser.set_defaults(N = 16) N = 16,
position = 'pos',
)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over output files -------------------------------------------------------------------------
if filenames == []:
filenames = ['STDIN'] if filenames == []: filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = name+'_tmp',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False, labeled = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
theTable = damask.ASCIItable(file['input'],file['output'],labels = False)
theTable.head_read()
# --- interpret header ---------------------------------------------------------------------------- # --- interpret header ----------------------------------------------------------------------------
info = {
'grid': np.zeros(3,'i'),
'size': np.zeros(3,'d'),
'origin': np.zeros(3,'d'),
'homogenization': 0,
'microstructures': 0,
}
newInfo = {
'microstructures': 0,
}
extra_header = []
for header in theTable.info: table.head_read()
headitems = map(str.lower,header.split()) info,extra_header = table.head_getGeom()
if len(headitems) == 0: continue
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \ table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \ 'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i\n'%info['homogenization'] + \ 'homogenization: %i'%info['homogenization'],
'microstructures: %i\n'%info['microstructures']) 'microstructures: %i'%info['microstructures'],
])
if np.any(info['grid'] < 1): errors = []
file['croak'].write('invalid grid a b c.\n') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
continue if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
if np.any(info['size'] <= 0.0): if errors != []:
file['croak'].write('invalid size x y z.\n') table.croak(errors)
table.close(dismiss = True)
continue continue
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = np.zeros(info['grid'].prod(),'i')
i = 0
while theTable.data_read(): microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
items = theTable.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
s = len(items)
microstructure[i:i+s] = items
i += s
# --- do work ------------------------------------------------------------------------------------ # --- do work ------------------------------------------------------------------------------------
newInfo = {
'microstructures': 0,
}
Nx = int(options.N/math.sqrt(options.N*info['size'][1]/info['size'][0])) Nx = int(options.N/math.sqrt(options.N*info['size'][1]/info['size'][0]))
Ny = int(options.N/math.sqrt(options.N*info['size'][0]/info['size'][1])) Ny = int(options.N/math.sqrt(options.N*info['size'][0]/info['size'][1]))
Nz = int((max(options.z)-min(options.z))/info['size'][2]*info['grid'][2]) Nz = int((max(options.z)-min(options.z))/info['size'][2]*info['grid'][2])
file['croak'].write('poking %i x %i x %i...\n'%(Nx,Ny,Nz)) table.croak('poking {0} x {1} x {2}...'.format(Nx,Ny,Nz))
microstructure = microstructure.reshape(info['grid'],order='F')
seeds = np.zeros((Nx*Ny*Nz,4),'d') seeds = np.zeros((Nx*Ny*Nz,4),'d')
grid = np.zeros(3,'i') grid = np.zeros(3,'i')
@ -140,39 +108,41 @@ for name in filenames:
coordinates = (0.5+grid)*info['size']/info['grid'] coordinates = (0.5+grid)*info['size']/info['grid']
seeds[n,0:3] = coordinates/info['size'] # normalize coordinates to box seeds[n,0:3] = coordinates/info['size'] # normalize coordinates to box
seeds[n, 3] = microstructure[grid[0],grid[1],grid[2]] seeds[n, 3] = microstructure[grid[0],grid[1],grid[2]]
# file['croak'].write('%s\t%i\n'%(str(seeds[n,:3]),seeds[n,3]))
if options.x: grid[0] += 1 if options.x: grid[0] += 1
if options.y: grid[1] += 1 if options.y: grid[1] += 1
n += 1 n += 1
# file['croak'].write('\n')
newInfo['microstructures'] = len(np.unique(seeds[:,3])) newInfo['microstructures'] = len(np.unique(seeds[:,3]))
# --- report --------------------------------------------------------------------------------------- # --- report ---------------------------------------------------------------------------------------
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
#--- write header --------------------------------------------------------------------------------- remarks = []
theTable.labels_clear() if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
theTable.labels_append(['x','y','z','microstructure']) if remarks != []: table.croak(remarks)
theTable.info_clear()
theTable.info_append(extra_header+[ # ------------------------------------------ assemble header ---------------------------------------
scriptID,
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],), table.info_clear()
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],), table.info_append(extra_header+[
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],), scriptID + ' ' + ' '.join(sys.argv[1:]),
"homogenization\t%i"%info['homogenization'], "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
"microstructures\t%i"%(newInfo['microstructures']), "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
"homogenization\t{homog}".format(homog=info['homogenization']),
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
]) ])
table.labels_clear()
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()
# --- write seeds information ------------------------------------------------------------
theTable.head_write()
theTable.output_flush()
theTable.data = seeds theTable.data = seeds
theTable.data_writeArray('%g') theTable.data_writeArray()
theTable.output_flush()
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
theTable.close() table.close() # close ASCII table
os.rename(file['name']+'_tmp',os.path.splitext(file['name'])[0] + '_poked_%ix%ix%i.seeds'%(Nx,Ny,Nz)) if name != 'STDIN':
os.rename(name+'_tmp',os.path.splitext(name])[0] + '_poked_%ix%ix%i.seeds'%(Nx,Ny,Nz))

View File

@ -11,6 +11,21 @@ from scipy import spatial
scriptID = string.replace('$Id$','\n','\\n') scriptID = string.replace('$Id$','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0] scriptName = os.path.splitext(scriptID.split()[1])[0]
# ------------------------------------------ aux functions ---------------------------------
def kdtree_search(cloud, queryPoints):
'''
find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d)
'''
n = queryPoints.shape[0]
distances = np.zeros(n,dtype=float)
tree = spatial.cKDTree(cloud)
for i in xrange(n):
distances[i], index = tree.query(queryPoints[i])
return distances
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
@ -21,144 +36,172 @@ Reports positions with random crystal orientations in seeds file format to STDOU
""", version = scriptID) """, version = scriptID)
parser.add_option('-N', dest='N', type='int', metavar='int', \ parser.add_option('-N', dest='N',
type = 'int', metavar = 'int',
help = 'number of seed points to distribute [%default]') help = 'number of seed points to distribute [%default]')
parser.add_option('-g','--grid', dest='grid', type='int', nargs=3, metavar='int int int', \ parser.add_option('-g','--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = 'int int int',
help='min a,b,c grid of hexahedral box %default') help='min a,b,c grid of hexahedral box %default')
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', \ parser.add_option('-m', '--microstructure',
dest = 'microstructure',
type = 'int', metavar='int',
help = 'first microstructure index [%default]')
parser.add_option('-r', '--rnd',
dest = 'randomSeed', type = 'int', metavar = 'int',
help = 'seed of random number generator [%default]') help = 'seed of random number generator [%default]')
parser.add_option('-w', '--weights', dest='weights', action='store_true',
help = 'assign random weigts (Gaussian Distribution) to seed points for laguerre tessellation [%default]')
parser.add_option('-m', '--microstructure', dest='microstructure', type='int',
help='first microstructure index [%default]', metavar='int')
parser.add_option('-s','--selective', dest='selective', action='store_true',
help = 'selective picking of seed points from random seed points [%default]')
group = OptionGroup(parser, "Laguerre Tessellation Options", group = OptionGroup(parser, "Laguerre Tessellation Options",
"Parameters determining shape of weight distribution of seed points" "Parameters determining shape of weight distribution of seed points"
) )
group.add_option('--mean', dest='mean', type='float', metavar='float', \ group.add_option('-w', '--weights',
help='mean of Gaussian Distribution for weights [%default]') action = 'store_true',
group.add_option('--sigma', dest='sigma', type='float', metavar='float', \ dest = 'weights',
help='standard deviation of Gaussian Distribution for weights [%default]') help = 'assign random weigts (normal distribution) to seed points for Laguerre tessellation [%default]')
group.add_option('--mean',
dest = 'mean',
type = 'float', metavar = 'float',
help = 'mean of normal distribution for weights [%default]')
group.add_option('--sigma',
dest = 'sigma',
type = 'float', metavar = 'float',
help='standard deviation of normal distribution for weights [%default]')
parser.add_option_group(group) parser.add_option_group(group)
group = OptionGroup(parser, "Selective Seeding Options", group = OptionGroup(parser, "Selective Seeding Options",
"More uniform distribution of seed points using Mitchell\'s Best Candidate Algorithm" "More uniform distribution of seed points using Mitchell\'s Best Candidate Algorithm"
) )
group.add_option('--distance', dest='bestDistance', type='float', metavar='float', \ group.add_option('-s','--selective',
action = 'store_true',
dest = 'selective',
help = 'selective picking of seed points from random seed points [%default]')
group.add_option('--distance',
dest = 'distance',
type = 'float', metavar = 'float',
help = 'minimum distance to the next neighbor [%default]') help = 'minimum distance to the next neighbor [%default]')
group.add_option('--numCandidates', dest='numCandidates', type='int', metavar='int', \ group.add_option('--numCandidates',
help='maximum number of point to consider for initial random points generation [%default]') dest = 'numCandidates',
type = 'int', metavar = 'int',
help = 'size of point group to select best distance from [%default]')
parser.add_option_group(group) parser.add_option_group(group)
parser.set_defaults(randomSeed = None) parser.set_defaults(randomSeed = None,
parser.set_defaults(grid = (16,16,16)) grid = (16,16,16),
parser.set_defaults(N = 20) N = 20,
parser.set_defaults(weights=False) weights = False,
parser.set_defaults(mean = 0.0) mean = 0.0,
parser.set_defaults(sigma = 1.0) sigma = 1.0,
parser.set_defaults(microstructure = 1) microstructure = 1,
parser.set_defaults(selective = False) selective = False,
parser.set_defaults(bestDistance = 0.2) distance = 0.2,
parser.set_defaults(numCandidates = 10) numCandidates = 10,
)
(options,filenames) = parser.parse_args()
(options,filename) = parser.parse_args()
options.grid = np.array(options.grid) options.grid = np.array(options.grid)
labels = "1_coords\t2_coords\t3_coords\tphi1\tPhi\tphi2\tmicrostructure"
# ------------------------------------------ Functions Definitions ---------------------------------
def kdtree_search(xyz, point) :
dist, index = spatial.cKDTree(xyz).query(np.array(point))
return dist
def generatePoint() :
return np.array([random.uniform(0,float(options.grid[0])/float(max(options.grid))), \
random.uniform(0,float(options.grid[1])/float(max(options.grid))), \
random.uniform(0,float(options.grid[2])/float(max(options.grid)))])
# ------------------------------------------ setup file handle -------------------------------------
if filename == []:
file = {'output':sys.stdout, 'croak':sys.stderr}
else:
file = {'output':open(filename[0],'w'), 'croak':sys.stderr}
gridSize = options.grid.prod() gridSize = options.grid.prod()
if gridSize == 0:
file['croak'].write('zero grid dimension for %s.\n'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]]))) if options.randomSeed == None: options.randomSeed = int(os.urandom(4).encode('hex'), 16)
np.random.seed(options.randomSeed) # init random generators
random.seed(options.randomSeed)
# --- loop over output files -------------------------------------------------------------------------
if filenames == []: filenames = ['STDIN']
for name in filenames:
table = damask.ASCIItable(name = name, outname = None,
buffered = False, writeonly = True)
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
# --- sanity checks -------------------------------------------------------------------------
errors = []
if gridSize == 0: errors.append('zero grid dimension for %s.'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]])))
if options.N > gridSize/10.: errors.append('seed count exceeds 0.1 of grid points.')
if options.selective and 4./3.*math.pi*(options.distance/2.)**3*options.N > 0.5:
errors.append('maximum recommended seed point count for given distance is {}.'.format(int(3./8./math.pi/(options.distance/2.)**3)))
if errors != []:
table.croak(errors)
sys.exit() sys.exit()
if options.N > gridSize:
file['croak'].write('accommodating only %i seeds on grid.\n'%gridSize) # --- do work ------------------------------------------------------------------------------------
options.N = gridSize
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed
np.random.seed(randomSeed) # init random generators
random.seed(randomSeed)
grainEuler = np.random.rand(3,options.N) # create random Euler triplets grainEuler = np.random.rand(3,options.N) # create random Euler triplets
grainEuler[0,:] *= 360.0 # phi_1 is uniformly distributed grainEuler[0,:] *= 360.0 # phi_1 is uniformly distributed
grainEuler[1,:] = np.arccos(2*grainEuler[1,:]-1)*180.0/math.pi # cos(Phi) is uniformly distributed grainEuler[1,:] = np.degrees(np.arccos(2*grainEuler[1,:]-1)) # cos(Phi) is uniformly distributed
grainEuler[2,:] *= 360.0 # phi_2 is uniformly distributed grainEuler[2,:] *= 360.0 # phi_2 is uniformly distributed
microstructure=np.arange(options.microstructure,options.microstructure+options.N).reshape(1,options.N) if not options.selective:
if options.selective == False : seeds = np.zeros((3,options.N),dtype=float) # seed positions array
seedpoints = -np.ones(options.N,dtype='int') # init grid positions of seed points gridpoints = random.sample(range(gridSize),options.N) # create random permutation of all grid positions and choose first N
if options.N * 1024 < gridSize: # heuristic limit for random search seeds[0,:] = (np.mod(gridpoints ,options.grid[0])\
i = 0
while i < options.N: # until all (unique) points determined
p = np.random.randint(gridSize) # pick a location
if p not in seedpoints: # not yet taken?
seedpoints[i] = p # take it
i += 1 # advance stepper
else:
seedpoints = np.array(random.sample(range(gridSize),options.N)) # create random permutation of all grid positions and choose first N
seeds = np.zeros((3,options.N),float) # init seed positions
seeds[0,:] = (np.mod(seedpoints ,options.grid[0])\
+np.random.random()) /options.grid[0] +np.random.random()) /options.grid[0]
seeds[1,:] = (np.mod(seedpoints// options.grid[0] ,options.grid[1])\ seeds[1,:] = (np.mod(gridpoints// options.grid[0] ,options.grid[1])\
+np.random.random()) /options.grid[1] +np.random.random()) /options.grid[1]
seeds[2,:] = (np.mod(seedpoints//(options.grid[1]*options.grid[0]),options.grid[2])\ seeds[2,:] = (np.mod(gridpoints//(options.grid[1]*options.grid[0]),options.grid[2])\
+np.random.random()) /options.grid[2] +np.random.random()) /options.grid[2]
table = np.transpose(np.concatenate((seeds,grainEuler,microstructure),axis = 0))
else :
samples = generatePoint().reshape(1,3)
while samples.shape[0] < options.N :
bestDistance = options.bestDistance
for i in xrange(options.numCandidates) :
c = generatePoint()
d = kdtree_search(samples, c)
if (d > bestDistance) :
bestDistance = d
bestCandidate = c
if kdtree_search(samples,bestCandidate) != 0.0 :
samples = np.append(samples,bestCandidate.reshape(1,3),axis=0)
else: else:
continue
table = np.transpose(np.concatenate((samples.T,grainEuler,microstructure),axis = 0)) seeds = np.zeros((options.N,3),dtype=float) # seed positions array
seeds[0] = np.random.random(3)*options.grid/max(options.grid)
i = 1 # start out with one given point
if i%(options.N/100.) < 1: table.croak('.',False)
while i < options.N:
candidates = np.random.random(options.numCandidates*3).reshape(options.numCandidates,3)
distances = kdtree_search(seeds[:i],candidates)
best = distances.argmax()
if distances[best] > options.distance: # require minimum separation
seeds[i] = candidates[best] # take candidate with maximum separation to existing point cloud
i += 1
if i%(options.N/100.) < 1: table.croak('.',False)
table.croak('')
seeds = np.transpose(seeds) # prepare shape for stacking
if options.weights: if options.weights:
weight = np.random.normal(loc=options.mean, scale=options.sigma, size=options.N) seeds = np.transpose(np.vstack((seeds,
table = np.append(table, weight.reshape(options.N,1), axis=1) grainEuler,
labels += "\tweight" np.arange(options.microstructure,
options.microstructure + options.N),
np.random.normal(loc=options.mean, scale=options.sigma, size=options.N),
)))
else:
seeds = np.transpose(np.vstack((seeds,
grainEuler,
np.arange(options.microstructure,
options.microstructure + options.N),
)))
# -------------------------------------- Write Data -------------------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
header = ["5\theader", table.info_clear()
scriptID + " " + " ".join(sys.argv[1:]), table.info_append([
"grid\ta {}\tb {}\tc {}".format(options.grid[0],options.grid[1],options.grid[2]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=options.grid),
"microstructures\t{}".format(options.N), "microstructures\t{}".format(options.N),
"randomSeed\t{}".format(randomSeed), "randomSeed\t{}".format(options.randomSeed),
"%s"%labels, ])
] table.labels_clear()
table.labels_append( ['{dim}_{label}'.format(dim = 1+i,label = 'pos') for i in xrange(3)] +
['{dim}_{label}'.format(dim = 1+i,label = 'Euler') for i in xrange(3)] +
['microstructure'] +
(['weight'] if options.weights else []))
table.head_write()
table.output_flush()
for line in header: # --- write seeds information ------------------------------------------------------------
file['output'].write(line+"\n")
np.savetxt(file['output'], table, fmt='%10.6f', delimiter='\t') table.data = seeds
table.data_writeArray()
# --- output finalization --------------------------------------------------------------------------
table.close() # close ASCII table

View File

@ -13,18 +13,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Create seed file by taking microstructure indices from given ASCIItable column. Create seed file by taking microstructure indices from given ASCIItable column.
@ -36,16 +24,27 @@ Examples:
""", version = scriptID) """, version = scriptID)
parser.add_option('-p', '--positions', dest = 'pos', metavar = 'string', parser.add_option('-p', '--positions',
help = 'coordinate label') dest = 'pos',
parser.add_option('--boundingbox', dest = 'box', type = 'float', nargs = 6, metavar = ' '.join(['float']*6), type = 'string', metavar = 'string',
help = 'min (x,y,z) and max (x,y,z) coordinates of bounding box [auto]') help = 'coordinate label [%default]')
parser.add_option('-i', '--index', dest = 'index', type = 'string', metavar = 'string', parser.add_option('--boundingbox',
help = 'microstructure index label') dest = 'box',
parser.add_option('-w','--white', dest = 'whitelist', action = 'extend', type = 'float', nargs = 6, metavar = ' '.join(['float']*6),
help = 'white list of microstructure indices', metavar = '<LIST>') help = 'min (x,y,z) and max (x,y,z) coordinates of bounding box [tight]')
parser.add_option('-b','--black', dest = 'blacklist', action = 'extend', parser.add_option('-i', '--index',
help = 'black list of microstructure indices', metavar = '<LIST>') dest = 'index',
type = 'string', metavar = 'string',
help = 'microstructure index label [%default]')
parser.add_option('-w','--white',
dest = 'whitelist',
action = 'extend', metavar = '<int LIST>',
help = 'whitelist of microstructure indices')
parser.add_option('-b','--black',
dest = 'blacklist',
action = 'extend', metavar = '<int LIST>',
help = 'blacklist of microstructure indices')
parser.set_defaults(pos = 'pos', parser.set_defaults(pos = 'pos',
index ='microstructure', index ='microstructure',
) )
@ -57,58 +56,51 @@ if options.blacklist != None: options.blacklist = map(int,options.blacklist)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: if filenames == []: filenames = ['STDIN']
filenames = ['STDIN']
for name in filenames: for name in filenames:
if name == 'STDIN': if not (name == 'STDIN' or os.path.exists(name)): continue
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.seeds',
file['croak'].write('\033[1m'+scriptName+'\033[0m\n') buffered = False)
else: table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
if not os.path.exists(name): continue
file = {'name':name,
'input':open(name),
'output':open(os.path.splitext(name)[0]+ \
('' if options.label == None else '_'+options.label)+ \
'.png','w'),
'croak':sys.stderr}
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],
buffered = False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ sanity checks ---------------------------------------
missing_labels = table.data_readArray([options.pos,options.index])
errors = [] errors = []
missing_labels = table.data_readArray(options.pos,options.label)
if len(missing_labels) > 0: if len(missing_labels) > 0:
errors.append('column%s %s not found'%('s' if len(missing_labels) > 1 else '', errors.append('column{} {} not found'.format('s' if len(missing_labels) > 1 else '',
', '.join(missing_labels))) ', '.join(missing_labels)))
for label, dim in {options.pos: 3, for label, dim in {options.pos: 3,
options.label: 1}.iteritems(): options.index: 1}.iteritems():
if table.label_dimension(label) != dim: if table.label_dimension(label) != dim:
errors.append('column %s has wrong dimension'%label) errors.append('column {} has wrong dimension'.format(label))
if errors != []: if errors != []:
file['croak'].write('\n'.join(errors)) table.croak(errors)
table.close(dismiss = True) # close ASCII table file handles and delete output file table.close(dismiss = True) # close ASCII table file handles and delete output file
continue continue
#--- finding bounding box ------------------------------------------------------------------------------------ # ------------------------------------------ process data ------------------------------------------
# --- finding bounding box -------------------------------------------------------------------------
boundingBox = np.array((np.amin(table.data[:,0:3],axis = 0),np.amax(table.data[:,0:3],axis = 0))) boundingBox = np.array((np.amin(table.data[:,0:3],axis = 0),np.amax(table.data[:,0:3],axis = 0)))
if options.box: if options.box:
boundingBox[0,:] = np.minimum(options.box[0:3],boundingBox[0,:]) boundingBox[0,:] = np.minimum(options.box[0:3],boundingBox[0,:])
boundingBox[1,:] = np.maximum(options.box[3:6],boundingBox[1,:]) boundingBox[1,:] = np.maximum(options.box[3:6],boundingBox[1,:])
#--- rescaling coordinates ------------------------------------------------------------------------------------ # --- rescaling coordinates ------------------------------------------------------------------------
table.data[:,0:3] -= boundingBox[0,:] table.data[:,0:3] -= boundingBox[0,:]
table.data[:,0:3] /= boundingBox[1,:]-boundingBox[0,:] table.data[:,0:3] /= boundingBox[1,:]-boundingBox[0,:]
#--- filtering of grain voxels ------------------------------------------------------------------------------------ # --- filtering of grain voxels --------------------------------------------------------------------
mask = np.logical_and(\ mask = np.logical_and(\
np.ones_like(table.data[:,3],bool) \ np.ones_like(table.data[:,3],bool) \
if options.whitelist == None \ if options.whitelist == None \
@ -129,10 +121,8 @@ for name in filenames:
map(str,boundingBox[1,:]-boundingBox[0,:])))))), map(str,boundingBox[1,:]-boundingBox[0,:])))))),
] ]
table.labels_clear() table.labels_clear()
table.labels_append(['1_coords','2_coords','3_coords','microstructure']) # implicitly switching label processing/writing on table.labels_append(['1_pos','2_pos','3_pos','microstructure']) # implicitly switching label processing/writing on
table.head_write() table.head_write()
table.data_writeArray() table.data_writeArray()
table.output_flush()
table.close() # close ASCII tables table.close() # close ASCII tables