outsourced multiple repetitive functions into ASCIItable class.
changed ASCIItable API from file-handles to filenames. adopted these changes in pre and post processing scripts. unified behavior and look. fixed bugs here and there. improved functionality.
This commit is contained in:
parent
563d9e64dd
commit
d4e748b654
|
@ -12,40 +12,60 @@ class ASCIItable():
|
|||
|
||||
__slots__ = ['__IO__',
|
||||
'info',
|
||||
'labels',
|
||||
'labeled',
|
||||
'data',
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def __init__(self,
|
||||
fileIn = sys.stdin,
|
||||
fileOut = sys.stdout,
|
||||
buffered = False, # flush writes
|
||||
labels = True): # assume table has labels
|
||||
self.__IO__ = {'in': fileIn,
|
||||
'out':fileOut,
|
||||
'output':[],
|
||||
'buffered':buffered,
|
||||
'labels':labels,
|
||||
'validReadSize': 0,
|
||||
'readBuffer': [], # buffer to hold non-advancing reads
|
||||
name = 'STDIN',
|
||||
outname = None,
|
||||
buffered = False, # flush writes
|
||||
labeled = True, # assume table has labels
|
||||
readonly = False, # no reading from file
|
||||
writeonly = False, # no writing to file
|
||||
):
|
||||
self.__IO__ = {'output': [],
|
||||
'buffered': buffered,
|
||||
'labeled': labeled, # header contains labels
|
||||
'labels': [], # labels according to file info
|
||||
'readBuffer': [], # buffer to hold non-advancing reads
|
||||
'dataStart': 0,
|
||||
}
|
||||
self.info = []
|
||||
self.labels = []
|
||||
self.data = []
|
||||
|
||||
self.__IO__ .update({'in': sys.stdin,
|
||||
'out': sys.stdout,
|
||||
} if name == 'STDIN' else
|
||||
{'in': sys.stdin if writeonly else open(name,'r') ,
|
||||
'out': sys.stdout if readonly else open(outname,'w'),
|
||||
}
|
||||
)
|
||||
self.info = []
|
||||
self.labels = []
|
||||
self.data = []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def _transliterateToFloat(self,x):
|
||||
def _transliterateToFloat(self,
|
||||
x):
|
||||
try:
|
||||
return float(x)
|
||||
except:
|
||||
return 0.0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def close(self,dismiss = False):
|
||||
def croak(self,
|
||||
what, newline = True):
|
||||
|
||||
sys.stderr.write(('\n'.join(map(str,what)) if not hasattr(what, "strip")
|
||||
and hasattr(what, "__getitem__")
|
||||
or hasattr(what, "__iter__") else str(what))
|
||||
+('\n' if newline else '')),
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def close(self,
|
||||
dismiss = False):
|
||||
self.input_close()
|
||||
self.output_flush()
|
||||
self.output_close(dismiss)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
|
@ -86,7 +106,8 @@ class ASCIItable():
|
|||
self.__IO__['output'] = []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def output_close(self, dismiss = False):
|
||||
def output_close(self,
|
||||
dismiss = False):
|
||||
try:
|
||||
self.__IO__['out'].close()
|
||||
except:
|
||||
|
@ -96,51 +117,96 @@ class ASCIItable():
|
|||
# ------------------------------------------------------------------
|
||||
def head_read(self):
|
||||
'''
|
||||
get column labels by either read the first row, or
|
||||
--if keyword "head[*]" is present-- the last line of the header
|
||||
get column labels by either reading
|
||||
the first row or, if keyword "head[*]" is present,
|
||||
the last line of the header
|
||||
'''
|
||||
import re
|
||||
|
||||
try:
|
||||
self.__IO__['in'].seek(0)
|
||||
except:
|
||||
pass
|
||||
firstline = self.__IO__['in'].readline()
|
||||
m = re.search('(\d+)\s+head', firstline.lower())
|
||||
if self.__IO__['labels']: # table features labels
|
||||
if m: # found header info
|
||||
self.info = [self.__IO__['in'].readline().strip() for i in xrange(1,int(m.group(1)))]
|
||||
self.labels = self.__IO__['in'].readline().split()
|
||||
else: # no header info (but labels)
|
||||
self.labels = firstline.split()
|
||||
|
||||
self.__IO__['validReadSize'] = len(self.labels)
|
||||
firstline = self.__IO__['in'].readline()
|
||||
m = re.search('(\d+)\s+head', firstline.lower()) # search for "head" keyword
|
||||
if self.__IO__['labeled']: # table features labels
|
||||
if m: # found header info
|
||||
self.info = [self.__IO__['in'].readline().strip() for i in xrange(1,int(m.group(1)))]
|
||||
self.labels = self.__IO__['in'].readline().split() # store labels found in last line
|
||||
else: # no header info (but labels)
|
||||
self.labels = firstline.split() # store labels from first line
|
||||
|
||||
self.__IO__['labels'] = list(self.labels) # backup labels (make COPY, not link)
|
||||
|
||||
else: # no labels present in table
|
||||
if m: # found header info
|
||||
self.info = [self.__IO__['in'].readline().strip() for i in xrange(0,int(m.group(1)))] # all header is info
|
||||
self.info = [self.__IO__['in'].readline().strip() for i in xrange(0,int(m.group(1)))] # all header is info ...
|
||||
# ... without any labels
|
||||
else: # otherwise file starts with data right away
|
||||
try:
|
||||
self.__IO__['in'].seek(0) # try to rewind
|
||||
except:
|
||||
self.__IO__['readBuffer'] = firstline # or at least save data in buffer
|
||||
try:
|
||||
self.__IO__['dataStart'] = self.__IO__['in'].tell() # current file position is at start of data
|
||||
except(IOError):
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def head_write(self):
|
||||
def head_write(self,
|
||||
header = True):
|
||||
'''
|
||||
write current header information (info + labels)
|
||||
'''
|
||||
if self.__IO__['labels']:
|
||||
return self.output_write ([
|
||||
'%i\theader'%(len(self.info)+1),
|
||||
self.info,
|
||||
'\t'.join(self.labels),
|
||||
])
|
||||
else:
|
||||
return self.output_write ([
|
||||
'%i\theader'%(len(self.info)),
|
||||
self.info,
|
||||
])
|
||||
|
||||
head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else []
|
||||
head.append(self.info)
|
||||
if self.__IO__['labeled']: head.append('\t'.join(self.labels))
|
||||
|
||||
return self.output_write(head)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def head_getGeom(self):
|
||||
'''
|
||||
interpret geom header
|
||||
'''
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in self.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue # skip blank lines
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
return info,extra_header
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def labels_append(self,
|
||||
what):
|
||||
|
@ -155,7 +221,7 @@ class ASCIItable():
|
|||
else:
|
||||
self.labels += [what]
|
||||
|
||||
self.__IO__['labels'] = True # switch on processing (in particular writing) of labels
|
||||
self.__IO__['labeled'] = True # switch on processing (in particular writing) of labels
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def labels_clear(self):
|
||||
|
@ -163,7 +229,7 @@ class ASCIItable():
|
|||
delete existing labels and switch to no labeling
|
||||
'''
|
||||
self.labels = []
|
||||
self.__IO__['labels'] = False
|
||||
self.__IO__['labeled'] = False
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def label_index(self,
|
||||
|
@ -175,20 +241,20 @@ class ASCIItable():
|
|||
'''
|
||||
from collections import Iterable
|
||||
|
||||
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
||||
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
||||
idx = []
|
||||
for label in labels:
|
||||
if label != None:
|
||||
try:
|
||||
idx.append(int(label)) # column given as integer number?
|
||||
idx.append(int(label)) # column given as integer number?
|
||||
except ValueError:
|
||||
try:
|
||||
idx.append(self.labels.index(label)) # locate string in label list
|
||||
idx.append(self.labels.index(label)) # locate string in label list
|
||||
except ValueError:
|
||||
try:
|
||||
idx.append(self.labels.index('1_'+label)) # locate '1_'+string in label list
|
||||
idx.append(self.labels.index('1_'+label)) # locate '1_'+string in label list
|
||||
except ValueError:
|
||||
idx.append(-1) # not found...
|
||||
idx.append(-1) # not found...
|
||||
else:
|
||||
try:
|
||||
idx = int(labels)
|
||||
|
@ -197,7 +263,7 @@ class ASCIItable():
|
|||
idx = self.labels.index(labels)
|
||||
except ValueError:
|
||||
try:
|
||||
idx = self.labels.index('1_'+labels) # locate '1_'+string in label list
|
||||
idx = self.labels.index('1_'+labels) # locate '1_'+string in label list
|
||||
except ValueError:
|
||||
idx = None if labels == None else -1
|
||||
|
||||
|
@ -214,47 +280,64 @@ class ASCIItable():
|
|||
|
||||
from collections import Iterable
|
||||
|
||||
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
||||
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
||||
dim = []
|
||||
for label in labels:
|
||||
if label != None:
|
||||
myDim = -1
|
||||
try: # column given as number?
|
||||
try: # column given as number?
|
||||
idx = int(label)
|
||||
myDim = 1 # if found has at least dimension 1
|
||||
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
||||
myDim = 1 # if found has at least dimension 1
|
||||
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
||||
while idx+myDim < len(self.labels) and self.labels[idx+myDim][:2] == "%i_"%(myDim+1):
|
||||
myDim += 1 # add while found
|
||||
except ValueError: # column has string label
|
||||
if label in self.labels: # can be directly found?
|
||||
myDim = 1 # scalar by definition
|
||||
elif '1_'+label in self.labels: # look for first entry of possible multidim object
|
||||
idx = self.labels.index('1_'+label) # get starting column
|
||||
myDim = 1 # (at least) one-dimensional
|
||||
myDim += 1 # add while found
|
||||
except ValueError: # column has string label
|
||||
if label in self.labels: # can be directly found?
|
||||
myDim = 1 # scalar by definition
|
||||
elif '1_'+label in self.labels: # look for first entry of possible multidim object
|
||||
idx = self.labels.index('1_'+label) # get starting column
|
||||
myDim = 1 # (at least) one-dimensional
|
||||
while idx+myDim < len(self.labels) and self.labels[idx+myDim][:2] == "%i_"%(myDim+1):
|
||||
myDim += 1 # keep adding while going through object
|
||||
myDim += 1 # keep adding while going through object
|
||||
|
||||
dim.append(myDim)
|
||||
else:
|
||||
dim = -1 # assume invalid label
|
||||
dim = -1 # assume invalid label
|
||||
idx = -1
|
||||
try: # column given as number?
|
||||
try: # column given as number?
|
||||
idx = int(labels)
|
||||
dim = 1 # if found has at least dimension 1
|
||||
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
||||
dim = 1 # if found has at least dimension 1
|
||||
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
||||
while idx+dim < len(self.labels) and self.labels[idx+dim][:2] == "%i_"%(dim+1):
|
||||
dim += 1 # add as long as found
|
||||
except ValueError: # column has string label
|
||||
if labels in self.labels: # can be directly found?
|
||||
dim = 1 # scalar by definition
|
||||
elif '1_'+labels in self.labels: # look for first entry of possible multidim object
|
||||
idx = self.labels.index('1_'+labels) # get starting column
|
||||
dim = 1 # is (at least) one-dimensional
|
||||
dim += 1 # add as long as found
|
||||
except ValueError: # column has string label
|
||||
if labels in self.labels: # can be directly found?
|
||||
dim = 1 # scalar by definition
|
||||
elif '1_'+labels in self.labels: # look for first entry of possible multidim object
|
||||
idx = self.labels.index('1_'+labels) # get starting column
|
||||
dim = 1 # is (at least) one-dimensional
|
||||
while idx+dim < len(self.labels) and self.labels[idx+dim][:2] == "%i_"%(dim+1):
|
||||
dim += 1 # keep adding while going through object
|
||||
dim += 1 # keep adding while going through object
|
||||
|
||||
return np.array(dim) if isinstance(dim,list) else dim
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def label_indexrange(self,
|
||||
labels):
|
||||
'''
|
||||
tell index range for given label(s).
|
||||
return numpy array if asked for list of labels.
|
||||
transparently deals with label positions implicitly given as numbers or their headings given as strings.
|
||||
'''
|
||||
|
||||
from collections import Iterable
|
||||
|
||||
start = self.label_index(labels)
|
||||
dim = self.label_dimension(labels)
|
||||
|
||||
return map(lambda a,b: xrange(a,a+b), zip(start,dim)) if isinstance(labels, Iterable) and not isinstance(labels, str) \
|
||||
else xrange(start,start+dim)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def info_append(self,
|
||||
what):
|
||||
|
@ -278,11 +361,14 @@ class ASCIItable():
|
|||
|
||||
# ------------------------------------------------------------------
|
||||
def data_rewind(self):
|
||||
self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section
|
||||
self.__IO__['readBuffer'] = [] # delete any non-advancing data reads
|
||||
self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section
|
||||
self.__IO__['readBuffer'] = [] # delete any non-advancing data reads
|
||||
self.labels = list(self.__IO__['labels']) # restore label info found in header (as COPY, not link)
|
||||
self.__IO__['labeled'] = len(self.labels) > 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_skipLines(self,count):
|
||||
def data_skipLines(self,
|
||||
count):
|
||||
'''
|
||||
wind forward by count number of lines
|
||||
'''
|
||||
|
@ -292,36 +378,27 @@ class ASCIItable():
|
|||
return alive
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_read(self,advance = True):
|
||||
def data_read(self,
|
||||
advance = True):
|
||||
'''
|
||||
read next line (possibly buffered) and parse it into data array
|
||||
'''
|
||||
if len(self.__IO__['readBuffer']) > 0:
|
||||
line = self.__IO__['readBuffer'].pop(0) # take buffered content
|
||||
line = self.__IO__['readBuffer'].pop(0) # take buffered content
|
||||
else:
|
||||
line = self.__IO__['in'].readline() # get next data row from file
|
||||
line = self.__IO__['in'].readline() # get next data row from file
|
||||
|
||||
if not advance:
|
||||
self.__IO__['readBuffer'].append(line) # keep line just read in buffer
|
||||
self.__IO__['readBuffer'].append(line) # keep line just read in buffer
|
||||
|
||||
if self.__IO__['labels']:
|
||||
items = line.split()[:self.__IO__['validReadSize']] # use up to valid size (label count)
|
||||
self.data = items if len(items) == self.__IO__['validReadSize'] else [] # take if correct number of entries
|
||||
if self.__IO__['labeled']: # if table has labels
|
||||
items = line.split()[:len(self.__IO__['labels'])] # use up to label count (from original file info)
|
||||
self.data = items if len(items) == len(self.__IO__['labels']) else [] # take entries if correct number, i.e. not too few compared to label count
|
||||
else:
|
||||
self.data = line.split() # take all
|
||||
self.data = line.split() # otherwise take all
|
||||
|
||||
return self.data != []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_readLine(self,line):
|
||||
'''
|
||||
seek beginning of data and wind forward to selected line
|
||||
'''
|
||||
self.__IO__['in'].seek(self.__IO__['dataStart'])
|
||||
for i in xrange(line-1):
|
||||
self.__IO__['in'].readline()
|
||||
self.data_read()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_readArray(self,
|
||||
labels = []):
|
||||
|
@ -329,36 +406,37 @@ class ASCIItable():
|
|||
read whole data of all (given) labels as numpy array
|
||||
'''
|
||||
|
||||
if not isinstance(labels,list):
|
||||
labels = [labels]
|
||||
if labels == [None] or labels == []:
|
||||
try:
|
||||
self.data_rewind() # try to wind back to start of data
|
||||
except:
|
||||
pass # assume/hope we are at data start already...
|
||||
|
||||
if labels == None or labels == []:
|
||||
use = None # use all columns (and keep labels intact)
|
||||
labels_missing = []
|
||||
else:
|
||||
indices = self.label_index(labels) # check requested labels
|
||||
indices = self.label_index(labels) # check requested labels ...
|
||||
dimensions = self.label_dimension(labels) # ... and remember their dimension
|
||||
present = np.where(indices >= 0)[0] # positions in request list of labels that are present ...
|
||||
missing = np.where(indices < 0)[0] # ... and missing in table
|
||||
labels_missing = np.array(labels)[missing] # labels of missing data
|
||||
|
||||
columns = []
|
||||
for i,c in enumerate(indices[present]): # for all valid labels ...
|
||||
for i,(c,d) in enumerate(zip(indices[present],dimensions[present])): # for all valid labels ...
|
||||
columns += range(c,c + \
|
||||
(self.label_dimension(c) if str(c) != str(labels[present[i]]) \
|
||||
else 1)) # ... transparently add all components unless column referenced by number
|
||||
(d if str(c) != str(labels[present[i]]) else \
|
||||
1)) # ... transparently add all components unless column referenced by number or with explicit dimension
|
||||
use = np.array(columns)
|
||||
|
||||
self.labels = list(np.array(self.labels)[use]) if use != [] else [] # ... for missing and present columns
|
||||
self.__IO__['validReadSize'] = len(use) # update data width
|
||||
self.labels = list(np.array(self.labels)[use]) # update labels with valid subset
|
||||
|
||||
try:
|
||||
self.data_rewind() # try to wind back to start of data
|
||||
except:
|
||||
pass # assume/hope we are at data start already...
|
||||
self.data = np.loadtxt(self.__IO__['in'], usecols=use,ndmin=2)
|
||||
|
||||
return labels_missing
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_write(self,delimiter = '\t'):
|
||||
def data_write(self,
|
||||
delimiter = '\t'):
|
||||
'''
|
||||
write current data array and report alive output back
|
||||
'''
|
||||
|
@ -370,7 +448,8 @@ class ASCIItable():
|
|||
return self.output_write(delimiter.join(map(str,self.data)))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def data_writeArray(self,format = '%g',delimiter = '\t'):
|
||||
def data_writeArray(self,
|
||||
format = '%g', delimiter = '\t'):
|
||||
'''
|
||||
write whole numpy array data
|
||||
'''
|
||||
|
@ -389,10 +468,13 @@ class ASCIItable():
|
|||
|
||||
# ------------------------------------------------------------------
|
||||
def data_set(self,
|
||||
what,where):
|
||||
what, where):
|
||||
'''
|
||||
update data entry in column "where". grows data array if needed.
|
||||
'''
|
||||
idx = -1
|
||||
try:
|
||||
idx = self.labels.index(where)
|
||||
idx = self.label_index(where)
|
||||
if len(self.data) <= idx:
|
||||
self.data_append(['n/a' for i in xrange(idx+1-len(self.data))]) # grow data if too short
|
||||
self.data[idx] = str(what)
|
||||
|
@ -408,3 +490,30 @@ class ASCIItable():
|
|||
# ------------------------------------------------------------------
|
||||
def data_asFloat(self):
|
||||
return map(self._transliterateToFloat,self.data)
|
||||
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
def microstructure_read(self,
|
||||
grid):
|
||||
'''
|
||||
read microstructure data (from .geom format)
|
||||
'''
|
||||
|
||||
N = grid.prod() # expected number of microstructure indices in data
|
||||
microstructure = np.zeros(N,'i') # initialize as flat array
|
||||
|
||||
i = 0
|
||||
while i < N and self.data_read():
|
||||
items = self.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = range(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = min(len(items), N-i) # prevent overflow of microstructure array
|
||||
microstructure[i:i+s] = items[:s]
|
||||
i += s
|
||||
|
||||
return microstructure
|
||||
|
|
|
@ -18,104 +18,111 @@ def unravel(item):
|
|||
# --------------------------------------------------------------------
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Add column(s) with derived values according to user defined arithmetic operation between column(s).
|
||||
Columns can be specified either by label or index. Use ';' for ',' in functions.
|
||||
Add column(s) with derived values according to user-defined arithmetic operation between column(s).
|
||||
Column labels are tagged by '#label#' in formulas. Use ';' for ',' in functions.
|
||||
Numpy is available as np.
|
||||
|
||||
Example: distance to IP coordinates -- "math.sqrt( #ip.x#**2 + #ip.y#**2 + round(#ip.z#;3)**2 )"
|
||||
Special variables: #_row_# -- row index
|
||||
Examples: (1) magnitude of vector -- "np.linalg.norm(#vec#)" (2) rounded root of row number -- "round(math.sqrt(#_row_#);3)"
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='labels', action='extend', metavar='<string LIST>',
|
||||
help='(list of) new column labels')
|
||||
parser.add_option('-f','--formula', dest='formulas', action='extend', metavar='<string LIST>',
|
||||
help='(list of) formulas corresponding to labels')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'labels',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = '(list of) new column labels')
|
||||
parser.add_option('-f','--formula',
|
||||
dest = 'formulas',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = '(list of) formulas corresponding to labels')
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.labels == None or options.formulas == None:
|
||||
parser.error('no formulas and/or labels specified')
|
||||
elif len(options.labels) != len(options.formulas):
|
||||
parser.error('number of labels (%i) and formulas (%i) do not match'%(len(options.labels),len(options.formulas)))
|
||||
parser.error('no formulas and/or labels specified.')
|
||||
if len(options.labels) != len(options.formulas):
|
||||
parser.error('number of labels ({}) and formulas ({}) do not match.'.format(len(options.labels),len(options.formulas)))
|
||||
|
||||
for i in xrange(len(options.formulas)):
|
||||
options.formulas[i]=options.formulas[i].replace(';',',')
|
||||
options.formulas[i] = options.formulas[i].replace(';',',')
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ read header -------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ build formulae ----------------------------------------
|
||||
|
||||
specials = { \
|
||||
'_row_': 0,
|
||||
}
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
|
||||
evaluator = {}
|
||||
brokenFormula = {}
|
||||
|
||||
for label,formula in zip(options.labels,options.formulas):
|
||||
interpolator = []
|
||||
for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula
|
||||
formula = formula.replace('#'+column+'#','%f')
|
||||
idx = table.label_index(column)
|
||||
dim = table.label_dimension(column)
|
||||
if column in specials:
|
||||
interpolator += ['specials["%s"]'%column]
|
||||
elif column.isdigit():
|
||||
if len(table.labels) > int(column):
|
||||
interpolator += ['float(table.data[%i])'%(int(column))]
|
||||
else:
|
||||
file['croak'].write('column %s not found...\n'%column)
|
||||
brokenFormula[label] = True
|
||||
replacement = 'specials["{}"]'.format(column)
|
||||
elif dim == 1: # scalar input
|
||||
replacement = 'float(table.data[{}])'.format(idx) # take float value of data column
|
||||
elif dim > 1: # multidimensional input (vector, tensor, etc.)
|
||||
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
|
||||
else:
|
||||
try:
|
||||
interpolator += ['float(table.data[%i])'%table.labels.index(column)]
|
||||
except:
|
||||
file['croak'].write('column %s not found...\n'%column)
|
||||
brokenFormula[label] = True
|
||||
table.croak('column {} not found...'.format(column))
|
||||
brokenFormula[label] = True
|
||||
break
|
||||
|
||||
formula = formula.replace('#'+column+'#',replacement)
|
||||
|
||||
if label not in brokenFormula:
|
||||
evaluator[label] = "'" + formula + "'%(" + ','.join(interpolator) + ")"
|
||||
evaluator[label] = formula
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
firstLine=True
|
||||
|
||||
firstLine = True
|
||||
outputAlive = True
|
||||
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
specials['_row_'] += 1 # count row
|
||||
|
||||
# ------------------------------------------ calculate one result to get length of labels ---------
|
||||
|
||||
if firstLine:
|
||||
labelLen = {}
|
||||
for label in options.labels:
|
||||
labelLen[label] = np.size(eval(eval(evaluator[label])))
|
||||
firstLine = False
|
||||
labelDim = {}
|
||||
for label in [x for x in options.labels if x not in set(brokenFormula)]:
|
||||
labelDim[label] = np.size(eval(evaluator[label]))
|
||||
if labelDim[label] == 0: brokenFormula[label] = True
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for label,formula in zip(options.labels,options.formulas):
|
||||
if labelLen[label] == 0:
|
||||
brokenFormula[label] = True
|
||||
if label not in brokenFormula:
|
||||
table.labels_append(['%i_%s'%(i+1,label) for i in xrange(labelLen[label])] if labelLen[label]>1
|
||||
else label)
|
||||
table.head_write()
|
||||
firstLine = False
|
||||
|
||||
for label in options.labels: table.data_append(unravel(eval(eval(evaluator[label]))))
|
||||
if label not in brokenFormula:
|
||||
table.labels_append(['{}_{}'.format(i+1,label) for i in xrange(labelDim[label])] if labelDim[label] > 1
|
||||
else label)
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
for label in [x for x in options.labels if x not in set(brokenFormula)]:
|
||||
table.data_append(unravel(eval(evaluator[label])))
|
||||
# table.data_append(unravel(eval(eval(evaluator[label]))))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -18,59 +18,67 @@ Add column(s) containing Cauchy stress based on given column(s) of deformation g
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string',
|
||||
help='heading of columns containing deformation gradient [%default]')
|
||||
parser.add_option('-p','--stress', dest='stress', metavar='string',
|
||||
help='heading of columns containing first Piola--Kirchhoff stress [%default]')
|
||||
parser.set_defaults(defgrad = 'f')
|
||||
parser.set_defaults(stress = 'p')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'heading of columns containing deformation gradient [%default]')
|
||||
parser.add_option('-p','--stress',
|
||||
dest = 'stress',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
|
||||
|
||||
parser.set_defaults(defgrad = 'f',
|
||||
stress = 'p',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
missingColumns = False
|
||||
column={ 'defgrad': table.labels.index('1_'+options.defgrad),
|
||||
'stress': table.labels.index('1_'+options.stress)}
|
||||
for key in column:
|
||||
if column[key]<1:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
missingColumns=True
|
||||
if missingColumns: continue
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
column = {}
|
||||
|
||||
for tensor in [options.defgrad,options.stress]:
|
||||
dim = table.label_dimension(tensor)
|
||||
if dim < 0: errors.append('column {} not found.'.format(tensor))
|
||||
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
|
||||
else:
|
||||
column[tensor] = table.label_index(tensor)
|
||||
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append(['%i_Cauchy'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
F = np.array(map(float,table.data[column['defgrad']:column['defgrad']+9]),'d').reshape(3,3)
|
||||
P = np.array(map(float,table.data[column['stress'] :column['stress']+9]),'d').reshape(3,3)
|
||||
F = np.array(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9]),'d').reshape(3,3)
|
||||
P = np.array(map(float,table.data[column[options.stress ]:column[options.stress ]+9]),'d').reshape(3,3)
|
||||
table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -14,107 +14,110 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
# --------------------------------------------------------------------
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
|
||||
Add column containing debug information.
|
||||
Operates on periodic ordered three-dimensional data sets.
|
||||
Add column(s) containing the shape and volume mismatch resulting from given deformation gradient.
|
||||
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
|
||||
parser.add_option('--no-shape','-s', dest='noShape', action='store_false',
|
||||
help='do not calcuate shape mismatch')
|
||||
parser.add_option('--no-volume','-v', dest='noVolume', action='store_false',
|
||||
help='do not calculate volume mismatch')
|
||||
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
|
||||
help='column heading for coordinates [%default]')
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string ',
|
||||
help='column heading for coordinates [%default]')
|
||||
parser.set_defaults(coords = 'ipinitialcoord')
|
||||
parser.set_defaults(defgrad = 'f')
|
||||
parser.add_option('-c','--coordinates',
|
||||
dest = 'coords',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column heading of coordinates [%default]')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad',
|
||||
type = 'string', metavar = 'string ',
|
||||
help = 'column heading of deformation gradient [%default]')
|
||||
parser.add_option('--no-shape','-s',
|
||||
dest = 'shape',
|
||||
action = 'store_false',
|
||||
help = 'omit shape mismatch')
|
||||
parser.add_option('--no-volume','-v',
|
||||
dest = 'volume',
|
||||
action = 'store_false',
|
||||
help = 'omit volume mismatch')
|
||||
parser.set_defaults(coords = 'ipinitialcoord',
|
||||
defgrad = 'f',
|
||||
shape = True,
|
||||
volume = True,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
else: colCoord = table.label_index(options.coords)
|
||||
|
||||
if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad))
|
||||
else: colF = table.label_index(options.defgrad)
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
if options.shape: table.labels_append('shapeMismatch({})'.format(options.defgrad))
|
||||
if options.volume: table.labels_append('volMismatch({})'.format(options.defgrad))
|
||||
table.head_write()
|
||||
|
||||
# --------------- figure out size and grid ---------------------------------------------------------
|
||||
try:
|
||||
locationCol = table.labels.index('1_%s'%options.coords) # columns containing location data
|
||||
except ValueError:
|
||||
try:
|
||||
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data (legacy naming scheme)
|
||||
except ValueError:
|
||||
file['croak'].write('no coordinate data (1_%s/%s.x) found...\n'%(options.coords,options.coords))
|
||||
continue
|
||||
|
||||
table.data_readArray()
|
||||
|
||||
coords = [{},{},{}]
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
for i in xrange(len(table.data)):
|
||||
for j in xrange(3):
|
||||
coords[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z
|
||||
grid = np.array([len(coords[0]),\
|
||||
len(coords[1]),\
|
||||
len(coords[2]),],'i') # grid is number of distinct coordinates found
|
||||
coords[j][str(table.data[i,colCoord+j])] = True
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
|
||||
N = grid.prod()
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
key = '1_%s'%options.defgrad
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
continue
|
||||
else:
|
||||
column = table.labels.index(key) # remember columns of requested data
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad])
|
||||
if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.defgrad])
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ read deformation gradient field -----------------------
|
||||
table.data_rewind()
|
||||
F = np.zeros(N*9,'d').reshape([3,3]+list(grid))
|
||||
idx = 0
|
||||
while table.data_read():
|
||||
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
|
||||
idx += 1
|
||||
F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3)
|
||||
|
||||
Favg = damask.core.math.tensorAvg(F)
|
||||
# ------------------------------------------ process deformation gradient --------------------------
|
||||
|
||||
F = table.data[:,colF:colF+9].transpose().reshape([3,3]+list(options.dimension),order='F')
|
||||
Favg = damask.core.math.tensorAvg(F)
|
||||
centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0])
|
||||
|
||||
nodes = damask.core.mesh.nodesAroundCentres(size,Favg,centres)
|
||||
if not options.noShape: shapeMismatch = damask.core.mesh.shapeMismatch( size,F,nodes,centres)
|
||||
if not options.noVolume: volumeMismatch = damask.core.mesh.volumeMismatch(size,F,nodes)
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
table.data_rewind()
|
||||
idx = 0
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
|
||||
idx += 1
|
||||
if not options.noShape: table.data_append( shapeMismatch[x,y,z])
|
||||
if not options.noVolume: table.data_append(volumeMismatch[x,y,z])
|
||||
outputAlive = table.data_write() # output processed line
|
||||
stack = [table.data]
|
||||
if options.shape: stack.append(damask.core.mesh.shapeMismatch( size,F,nodes,centres))
|
||||
if options.volume: stack.append(damask.core.mesh.volumeMismatch(size,F,nodes))
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table
|
||||
table.output_close() # close output ASCII table
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
if len(stack) > 1: table.data = np.hstack(tuple(stack))
|
||||
table.data_writeArray('%.12g')
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,18 +12,20 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
|
||||
def curlFFT(geomdim,field):
|
||||
grid = np.array(np.shape(field)[0:3])
|
||||
wgt = 1.0/np.array(grid).prod()
|
||||
N = grid.prod() # field size
|
||||
n = np.array(np.shape(field)[3:]).prod() # data size
|
||||
wgt = 1.0/N
|
||||
|
||||
if len(np.shape(field)) == 4:
|
||||
if n == 3:
|
||||
dataType = 'vector'
|
||||
elif len(np.shape(field)) == 5:
|
||||
elif n == 9:
|
||||
dataType = 'tensor'
|
||||
|
||||
field_fourier=np.fft.fftpack.rfftn(field,axes=(0,1,2))
|
||||
curl_fourier=np.zeros(field_fourier.shape,'c16')
|
||||
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
|
||||
curl_fourier = np.zeros(field_fourier.shape,'c16')
|
||||
|
||||
# differentiation in Fourier space
|
||||
k_s=np.zeros([3],'i')
|
||||
k_s = np.zeros([3],'i')
|
||||
TWOPIIMG = (0.0+2.0j*math.pi)
|
||||
for i in xrange(grid[0]):
|
||||
k_s[0] = i
|
||||
|
@ -34,7 +36,7 @@ def curlFFT(geomdim,field):
|
|||
for k in xrange(grid[2]/2+1):
|
||||
k_s[2] = k
|
||||
if(k > grid[2]/2 ): k_s[2] = k_s[2] - grid[2]
|
||||
xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16')
|
||||
xi = np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16')
|
||||
if dataType == 'tensor':
|
||||
for l in xrange(3):
|
||||
curl_fourier[i,j,k,0,l] = ( field_fourier[i,j,k,l,2]*xi[1]\
|
||||
|
@ -50,11 +52,8 @@ def curlFFT(geomdim,field):
|
|||
+field_fourier[i,j,k,0]*xi[2]) *TWOPIIMG
|
||||
curl_fourier[i,j,k,2] = ( field_fourier[i,j,k,1]*xi[0]\
|
||||
-field_fourier[i,j,k,0]*xi[1]) *TWOPIIMG
|
||||
curl=np.fft.fftpack.irfftn(curl_fourier,axes=(0,1,2))
|
||||
if dataType == 'tensor':
|
||||
return curl.reshape([grid.prod(),9])
|
||||
if dataType == 'vector':
|
||||
return curl.reshape([grid.prod(),3])
|
||||
|
||||
return np.fft.fftpack.irfftn(curl_fourier,axes=(0,1,2)).reshape([N,n])
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
@ -68,113 +67,108 @@ Deals with both vector- and tensor-valued fields.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
|
||||
help='column heading for coordinates [%default]')
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.set_defaults(coords = 'ipinitialcoord')
|
||||
parser.add_option('-c','--coordinates',
|
||||
dest = 'coords',
|
||||
type = 'string', metavar='string',
|
||||
help = 'column heading for coordinates [%default]')
|
||||
parser.add_option('-v','--vector',
|
||||
dest = 'vector',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing tensor field values')
|
||||
|
||||
parser.set_defaults(coords = 'ipinitialcoord',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.vector == None and options.tensor == None:
|
||||
parser.error('no data column specified...')
|
||||
parser.error('no data column specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'shape':[3],
|
||||
'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'shape':[3,3],
|
||||
'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if options.vector != None: datainfo['vector']['label'] = options.vector
|
||||
if options.tensor != None: datainfo['tensor']['label'] = options.tensor
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.data_readArray()
|
||||
table.head_read()
|
||||
|
||||
# --------------- figure out name of coordinate data (support for legacy .x notation) -------------
|
||||
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
directions = ['x','y','z']
|
||||
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
|
||||
continue
|
||||
coordColumns = [table.labels.index(label) for label in coordLabels]
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
# --------------- figure out active columns -------------------------------------------------------
|
||||
active = defaultdict(list)
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
|
||||
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
column = {}
|
||||
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
else: coordCol = table.label_index(options.coords)
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
|
||||
else:
|
||||
active[datatype].append(label)
|
||||
items[type]['active'].append(what)
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
# --------------- assemble new header (metadata and columns containing curl) ----------------------
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels:
|
||||
table.labels_append(['%i_curlFFT(%s)'%(i+1,label) for i in xrange(datainfo[datatype]['len'])])# extend ASCII header with new labels
|
||||
for type, data in items.iteritems():
|
||||
for label in data['active']:
|
||||
table.labels_append(['{}_curlFFT({})'.format(i+1,label) for i in xrange(data['dim'])]) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# --------------- figure out size and grid ---------------------------------------------------------
|
||||
|
||||
table.data_readArray()
|
||||
|
||||
coords = [{},{},{}]
|
||||
for i in xrange(table.data.shape[0]):
|
||||
for i in xrange(len(table.data)):
|
||||
for j in xrange(3):
|
||||
coords[j][str(table.data[i,coordColumns[j]])] = True
|
||||
coords[j][str(table.data[i,coordCol+j])] = True
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
for i, points in enumerate(grid):
|
||||
if points == 1:
|
||||
mask = np.ones(3,dtype=bool)
|
||||
mask[i]=0
|
||||
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
|
||||
# ------------------------------------------ process value field -----------------------------------
|
||||
curl = defaultdict(dict)
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested curls
|
||||
startColumn=table.labels.index('1_'+label)
|
||||
curl[datatype][label] = curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
|
||||
table.data[:,startColumn:startColumn+datainfo[datatype]['len']].\
|
||||
reshape([grid[2],grid[1],grid[0]]+datainfo[datatype]['shape']))
|
||||
|
||||
# ------------------------------------------ add data ------------------------------------------
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested curls
|
||||
for c in xrange(curl[datatype][label][0,:].shape[0]): # append column by column
|
||||
lastRow = table.data.shape[1]
|
||||
table.data=np.insert(table.data,lastRow,curl[datatype][label][:,c],1)
|
||||
stack = [table.data]
|
||||
for type, data in items.iteritems():
|
||||
for i,label in enumerate(data['active']):
|
||||
stack.append(curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
|
||||
table.data[:,data['column'][i]:data['column'][i]+data['dim']].\
|
||||
reshape([grid[2],grid[1],grid[0]]+data['shape'])))
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
if len(stack) > 1: table.data = np.hstack(tuple(stack))
|
||||
table.data_writeArray('%.12g')
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -16,16 +16,23 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
|
||||
Add deformed configuration of given initial coordinates.
|
||||
Operates on periodic ordered three-dimensional data sets.
|
||||
Operates on periodic three-dimensional x,y,z-ordered data sets.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
|
||||
help='column label of coordinates [%default]')
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string',
|
||||
help='column label of deformation gradient [%default]')
|
||||
parser.add_option('--scaling', dest='scaling', type='float', nargs=3, , metavar = ' '.join(['float']*3),
|
||||
help='x/y/z scaling of displacment fluctuation')
|
||||
parser.add_option('-c','--coordinates',
|
||||
dest = 'coords',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label of coordinates [%default]')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label of deformation gradient [%default]')
|
||||
parser.add_option('--scaling',
|
||||
dest = 'scaling',
|
||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
||||
help = 'x/y/z scaling of displacement fluctuation')
|
||||
|
||||
parser.set_defaults(coords = 'ipinitialcoord',
|
||||
defgrad = 'f',
|
||||
scaling = [1.,1.,1.],
|
||||
|
@ -34,89 +41,75 @@ parser.set_defaults(coords = 'ipinitialcoord',
|
|||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
table.head_read()
|
||||
|
||||
if table.label_dimension(options.coords) != 3:
|
||||
file['croak'].write('no coordinate vector (1/2/3_%s) found...\n'%options.coords)
|
||||
continue
|
||||
if table.label_dimension(options.defgrad) != 9:
|
||||
file['croak'].write('no deformation gradient tensor (1..9_%s) found...\n'%options.defgrad)
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
else: colCoord = table.label_index(options.coords)
|
||||
|
||||
if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad))
|
||||
else: colF = table.label_index(options.defgrad)
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --------------- figure out size and grid ---------------------------------------------------------
|
||||
|
||||
colCoords = table.label_index(options.coords) # starting column of location data
|
||||
colDefGrad = table.label_index(options.defgrad) # remember columns of requested data
|
||||
table.data_readArray()
|
||||
|
||||
coords = [{},{},{}]
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
for i in xrange(len(table.data)):
|
||||
for j in xrange(3):
|
||||
coords[j][str(table.data[colCoords+j])] = True # remember coordinate along x,y,z
|
||||
grid = np.array([len(coords[0]),\
|
||||
len(coords[1]),\
|
||||
len(coords[2]),],'i') # grid is number of distinct coordinates found
|
||||
coords[j][str(table.data[i,colCoord+j])] = True
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
|
||||
for i, points in enumerate(grid):
|
||||
if points == 1:
|
||||
options.packing[i] = 1
|
||||
options.shift[i] = 0
|
||||
mask = np.ones(3,dtype=bool)
|
||||
mask[i]=0
|
||||
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
|
||||
N = grid.prod()
|
||||
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
table.labels_append(['%s_%s%s'%(coord+1,options.defgrad,options.coords) for coord in xrange(3)]) # extend ASCII header with new labels
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append(['{}_{}.{}'%(coord+1,options.defgrad,options.coords) for coord in xrange(3)]) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ read deformation gradient field -----------------------
|
||||
table.data_rewind()
|
||||
F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(grid))
|
||||
idx = 0
|
||||
while table.data_read():
|
||||
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
|
||||
idx += 1
|
||||
F[0:3,0:3,x,y,z] = np.array(map(float,table.data[colDefGrad:colDefGrad+9]),'d').reshape(3,3)
|
||||
# ------------------------------------------ process deformation gradient --------------------------
|
||||
|
||||
F = table.data[:,colF:colF+9].transpose().reshape([3,3]+list(options.dimension),order='F')
|
||||
Favg = damask.core.math.tensorAvg(F)
|
||||
centres = damask.core.mesh.deformedCoordsFFT(size,F,Favg,[1.0,1.0,1.0])
|
||||
|
||||
stack = [table.data,centres]
|
||||
|
||||
# ------------------------------------------ calculate coordinates ---------------------------------
|
||||
Favg = damask.core.math.tensorAvg(F)
|
||||
centroids = damask.core.mesh.deformedCoordsFFT(size,F,Favg,options.scaling)
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
table.data_rewind()
|
||||
idx = 0
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
(x,y,z) = damask.util.gridLocation(idx,grid) # figure out (x,y,z) position from line count
|
||||
idx += 1
|
||||
table.data_append(list(centroids[:,x,y,z]))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
table.close() # close tables
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
if len(stack) > 1: table.data = np.hstack(tuple(stack))
|
||||
table.data_writeArray('%.12g')
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -26,67 +26,68 @@ Add column(s) containing determinant of requested tensor column(s).
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing tensor field values')
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.tensor == None:
|
||||
parser.error('no data column specified...')
|
||||
parser.error('no data column specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
datainfo['tensor']['label'] += options.tensor
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
||||
else:
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
table.labels_append('det({})'.format(what)) # extend ASCII header with new labels
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
active = []
|
||||
column = defaultdict(dict)
|
||||
for label in datainfo['tensor']['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
else:
|
||||
active.append(label)
|
||||
column[label] = table.labels.index(key) # remember columns of requested data
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for label in active:
|
||||
table.labels_append('det(%s)'%label) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for label in active:
|
||||
table.data_append(determinant(map(float,table.data[column[label]:
|
||||
column[label]+datainfo['tensor']['len']])))
|
||||
for type, data in items.iteritems():
|
||||
for column in data['column']:
|
||||
table.data_append(determinant(map(float,table.data[column:
|
||||
column+data['dim']])))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -11,11 +11,14 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
|
||||
oneThird = 1.0/3.0
|
||||
|
||||
def deviator(m): # Carefull, do not change the value of m (its intent(inout)!)
|
||||
def deviator(m,spherical = False): # Carefull, do not change the value of m (its intent(inout)!)
|
||||
sph = oneThird*(m[0]+m[4]+m[8])
|
||||
return [m[0] -sph, m[1], m[2],
|
||||
dev = [
|
||||
m[0]-sph, m[1], m[2],
|
||||
m[3], m[4]-sph, m[5],
|
||||
m[6], m[7], m[8]-sph]
|
||||
m[6], m[7], m[8]-sph,
|
||||
]
|
||||
return dev,sph if spherical else dev
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
|
@ -26,72 +29,77 @@ Add column(s) containing deviator of requested tensor column(s).
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.add_option('-s','--spherical', dest='hydrostatic', action='store_true',
|
||||
help='also add sperical part of tensor (hydrostatic component, pressure)')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar='<string LIST>',
|
||||
help = 'heading of columns containing tensor field values')
|
||||
parser.add_option('-s','--spherical',
|
||||
dest = 'spherical',
|
||||
action = 'store_true',
|
||||
help = 'report spherical part of tensor (hydrostatic component, pressure)')
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.tensor == None:
|
||||
parser.error('no data column specified...')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
datainfo['tensor']['label'] += options.tensor
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
column = {}
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
|
||||
else:
|
||||
items[type]['active'].append(what)
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
|
||||
active = []
|
||||
column = defaultdict(dict)
|
||||
|
||||
for label in datainfo['tensor']['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
else:
|
||||
active.append(label)
|
||||
column[label] = table.labels.index(key) # remember columns of requested data
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for label in active:
|
||||
table.labels_append(['%i_dev(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels
|
||||
if(options.hydrostatic): table.labels_append('sph(%s)'%label)
|
||||
for type, data in items.iteritems():
|
||||
for label in data['active']:
|
||||
table.labels_append(['{}_dev({})'.format(i+1,label) for i in xrange(data['dim'])] + \
|
||||
(['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for label in active:
|
||||
myTensor = map(float,table.data[column[label]:
|
||||
column[label]+datainfo['tensor']['len']])
|
||||
table.data_append(deviator(myTensor))
|
||||
if(options.hydrostatic): table.data_append(oneThird*(myTensor[0]+myTensor[4]+myTensor[8]))
|
||||
for type, data in items.iteritems():
|
||||
for column in data['column']:
|
||||
table.data_append(deviator(map(float,table.data[column:
|
||||
column+data['dim']]),options.spherical))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,10 +12,12 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
|
||||
def divFFT(geomdim,field):
|
||||
grid = np.array(np.shape(field)[0:3])
|
||||
wgt = 1.0/np.array(grid).prod()
|
||||
N = grid.prod() # field size
|
||||
n = np.array(np.shape(field)[3:]).prod() # data size
|
||||
wgt = 1.0/N
|
||||
|
||||
field_fourier=np.fft.fftpack.rfftn(field,axes=(0,1,2))
|
||||
div_fourier=np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on wether tensor or vector
|
||||
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
|
||||
div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector
|
||||
|
||||
# differentiation in Fourier space
|
||||
k_s=np.zeros([3],'i')
|
||||
|
@ -30,18 +32,13 @@ def divFFT(geomdim,field):
|
|||
k_s[2] = k
|
||||
if(k > grid[2]/2 ): k_s[2] = k_s[2] - grid[2]
|
||||
xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16')
|
||||
if len(np.shape(field)) == 5: # tensor, 3x3 -> 3
|
||||
if n == 9: # tensor, 3x3 -> 3
|
||||
for l in xrange(3):
|
||||
div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG
|
||||
elif len(np.shape(field)) == 4: # vector, 3 -> 1
|
||||
elif n == 3: # vector, 3 -> 1
|
||||
div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG
|
||||
|
||||
div=np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2))
|
||||
|
||||
if len(np.shape(field)) == 5: # tensor, 3x3 -> 3
|
||||
return div.reshape([grid.prod(),3])
|
||||
elif len(np.shape(field)) == 4: # vector, 3 -> 1
|
||||
return div.reshape([grid.prod(),1])
|
||||
return np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)).reshape([N,n/3])
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
@ -55,113 +52,109 @@ Deals with both vector- and tensor-valued fields.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
|
||||
help='column heading for coordinates [%default]')
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.set_defaults(coords = 'ipinitialcoord')
|
||||
parser.add_option('-c','--coordinates',
|
||||
dest = 'coords',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column heading for coordinates [%default]')
|
||||
parser.add_option('-v','--vector',
|
||||
dest = 'vector',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing tensor field values')
|
||||
|
||||
parser.set_defaults(coords = 'ipinitialcoord',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.vector == None and options.tensor == None:
|
||||
parser.error('no data column specified...')
|
||||
parser.error('no data column specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'shape':[3],
|
||||
'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'shape':[3,3],
|
||||
'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if options.vector != None: datainfo['vector']['label'] = options.vector
|
||||
if options.tensor != None: datainfo['tensor']['label'] = options.tensor
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.data_readArray()
|
||||
table.head_read()
|
||||
|
||||
# --------------- figure out name of coordinate data (support for legacy .x notation) -------------
|
||||
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
directions = ['x','y','z']
|
||||
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
|
||||
continue
|
||||
coordColumns = [table.labels.index(label) for label in coordLabels]
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
# --------------- figure out active columns -------------------------------------------------------
|
||||
active = defaultdict(list)
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
|
||||
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
column = {}
|
||||
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
else: coordCol = table.label_index(options.coords)
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
|
||||
else:
|
||||
active[datatype].append(label)
|
||||
items[type]['active'].append(what)
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
# --------------- assemble new header (metadata and columns containing curl) ----------------------
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels:
|
||||
table.labels_append(['divFFT(%s)'%(label) if datatype == 'vector' else
|
||||
'%i_divFFT(%s)'%(i+1,label) for i in xrange(datainfo[datatype]['len']//3)])# extend ASCII header with new labels
|
||||
for type, data in items.iteritems():
|
||||
for label in data['active']:
|
||||
table.labels_append(['divFFT({})'.format(label) if type == 'vector' else
|
||||
'{}_divFFT({})'.format(i+1,label) for i in xrange(data['dim']//3)]) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# --------------- figure out size and grid ---------------------------------------------------------
|
||||
|
||||
table.data_readArray()
|
||||
|
||||
coords = [{},{},{}]
|
||||
for i in xrange(table.data.shape[0]):
|
||||
for i in xrange(len(table.data)):
|
||||
for j in xrange(3):
|
||||
coords[j][str(table.data[i,coordColumns[j]])] = True
|
||||
coords[j][str(table.data[i,coordCol+j])] = True
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
for i, points in enumerate(grid):
|
||||
if points == 1:
|
||||
mask = np.ones(3,dtype=bool)
|
||||
mask[i]=0
|
||||
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
|
||||
# ------------------------------------------ process value field -----------------------------------
|
||||
div = defaultdict(dict)
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested curls
|
||||
startColumn=table.labels.index('1_'+label)
|
||||
div[datatype][label] = divFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
|
||||
table.data[:,startColumn:startColumn+datainfo[datatype]['len']].\
|
||||
reshape([grid[2],grid[1],grid[0]]+datainfo[datatype]['shape']))
|
||||
|
||||
# ------------------------------------------ add data ------------------------------------------
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested curls
|
||||
for c in xrange(div[datatype][label][0,:].shape[0]): # append column by column
|
||||
lastRow = table.data.shape[1]
|
||||
table.data=np.insert(table.data,lastRow,div[datatype][label][:,c],1)
|
||||
stack = [table.data]
|
||||
for type, data in items.iteritems():
|
||||
for i,label in enumerate(data['active']):
|
||||
stack.append(divFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
|
||||
table.data[:,data['column'][i]:data['column'][i]+data['dim']].\
|
||||
reshape([grid[2],grid[1],grid[0]]+data['shape'])))
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
if len(stack) > 1: table.data = np.hstack(tuple(stack))
|
||||
table.data_writeArray('%.12g')
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -20,10 +20,10 @@ def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44)
|
|||
S44 = 1.0/stiffness[2]
|
||||
|
||||
invE = S11-(S11-S12-0.5*S44)* (1.0 - \
|
||||
(v[0]**4+v[1]**4+v[2]**4) \
|
||||
/#------------------------------------
|
||||
np.inner(v,v)**2 \
|
||||
)
|
||||
(v[0]**4+v[1]**4+v[2]**4) \
|
||||
/#------------------------------------
|
||||
np.inner(v,v)**2 \
|
||||
)
|
||||
|
||||
return 1.0/invE
|
||||
|
||||
|
@ -36,73 +36,62 @@ Add column(s) containing directional stiffness based on given cubic stiffness va
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-c','--stiffness', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='heading of column containing C11 (followed by C12, C44) field values')
|
||||
parser.add_option('-d','--direction','--hkl', dest='hkl', type='int', nargs=3, metavar='int int int',
|
||||
help='direction of elastic modulus [%default]')
|
||||
parser.set_defaults(hkl = (1,1,1))
|
||||
parser.add_option('-c','--stiffness',
|
||||
dest = 'stiffness',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of column containing C11 (followed by C12, C44) field values')
|
||||
parser.add_option('-d','--direction','--hkl',
|
||||
dest = 'hkl',
|
||||
type = 'int', nargs = 3, metavar = 'int int int',
|
||||
help = 'direction of elastic modulus [%default]')
|
||||
parser.set_defaults(hkl = (1,1,1),
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.vector == None:
|
||||
if options.stiffness == None:
|
||||
parser.error('no data column specified...')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
datainfo['vector']['label'] += options.vector
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
table.head_read()
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
active = []
|
||||
column = defaultdict(dict)
|
||||
|
||||
for label in datainfo['vector']['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
remarks = []
|
||||
columns = []
|
||||
|
||||
for i,column in enumerate(table.label_index(options.stiffness)):
|
||||
if column < 0: remarks.append('column {} not found.'.format(options.stiffness[i]))
|
||||
else:
|
||||
active.append(label)
|
||||
column[label] = table.labels.index(key) # remember columns of requested data
|
||||
columns.append(column)
|
||||
table.labels_append(['E{}{}{}({})'.format(*options.hkl,options.stiffness[i])) # extend ASCII header with new labels
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for label in active:
|
||||
table.labels_append('E%i%i%i(%s)'%(options.hkl[0],
|
||||
options.hkl[1],
|
||||
options.hkl[2],label)) # extend ASCII header with new labels
|
||||
if remarks != []: table.croak(remarks)
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for label in active:
|
||||
table.data_append(E_hkl(map(float,table.data[column[label]:\
|
||||
column[label]+datainfo['vector']['len']]),options.hkl))
|
||||
for column in columns:
|
||||
table.data_append(E_hkl(map(float,table.data[column:column+3]),options.hkl))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -106,11 +106,11 @@ parser.set_defaults(scale = 1.0)
|
|||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.type == None:
|
||||
parser.error('no feature type selected...')
|
||||
parser.error('no feature type selected.')
|
||||
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))):
|
||||
parser.error('type must be chosen from (%s)...'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
|
||||
parser.error('type must be chosen from (%s).'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
|
||||
if 'biplane' in options.type and 'boundary' in options.type:
|
||||
parser.error("both aliases 'biplane' and 'boundary' are selected...")
|
||||
parser.error("only one from aliases 'biplane' and 'boundary' possible.")
|
||||
|
||||
feature_list = []
|
||||
for i,feature in enumerate(features):
|
||||
|
@ -120,55 +120,69 @@ for i,feature in enumerate(features):
|
|||
feature_list.append(i) # remember valid features
|
||||
break
|
||||
|
||||
files = []
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.data_readArray()
|
||||
table.head_read()
|
||||
|
||||
# --------------- figure out name of coordinate data (support for legacy .x notation) ------------
|
||||
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
directions = ['x','y','z']
|
||||
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
|
||||
if not set(coordLabels).issubset(table.labels):
|
||||
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
|
||||
continue
|
||||
coordColumns = [table.labels.index(label) for label in coordLabels]
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
remarks = []
|
||||
column = {}
|
||||
|
||||
# --------------- figure out active column --------------------------------------------------------
|
||||
if options.id not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%options.id)
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
else: coordCol = table.label_index(options.coords)
|
||||
|
||||
if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id))
|
||||
else: idCol = table.label_index(options.id)
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for feature in feature_list:
|
||||
table.labels_append('ED_%s(%s)'%(features[feature]['names'][0],options.id)) # extend ASCII header with new labels
|
||||
table.labels_append('ED_{}({})'.format(features[feature]['names'][0],options.id)) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# --------------- figure out grid -----------------------------------------------------------------
|
||||
# --------------- figure out size and grid ---------------------------------------------------------
|
||||
|
||||
table.data_readArray()
|
||||
|
||||
coords = [{},{},{}]
|
||||
for i in xrange(len(table.data)):
|
||||
for i in xrange(len(table.data)):
|
||||
for j in xrange(3):
|
||||
coords[j][str(table.data[i,coordColumns[j]])] = True
|
||||
coords[j][str(table.data[i,coordCol+j])] = True
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
|
||||
# ------------------------------------------ process value field -----------------------------------
|
||||
unitlength = 0.0
|
||||
for i,r in enumerate(grid):
|
||||
if r > 1: unitlength = max(unitlength,(max(map(float,coords[i].keys()))-min(map(float,coords[i].keys())))/(r-1.0))
|
||||
|
||||
stack = [table.data]
|
||||
|
||||
neighborhood = neighborhoods[options.neighborhood]
|
||||
convoluted = np.empty([len(neighborhood)]+list(grid+2),'i')
|
||||
microstructure = periodic_3Dpad(np.array(table.data[:,table.labels.index(options.id)].reshape(grid),'i'))
|
||||
microstructure = periodic_3Dpad(np.array(table.data[:,idCol].reshape(grid),'i'))
|
||||
|
||||
for i,p in enumerate(neighborhood):
|
||||
stencil = np.zeros((3,3,3),'i')
|
||||
|
@ -181,29 +195,28 @@ for file in files:
|
|||
distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d')
|
||||
|
||||
convoluted = np.sort(convoluted,axis = 0)
|
||||
uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
|
||||
uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
|
||||
|
||||
for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood
|
||||
for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood
|
||||
uniques += np.where(np.logical_and(
|
||||
convoluted[i,1:-1,1:-1,1:-1] != convoluted[i-1,1:-1,1:-1,1:-1], # flip of ID difference detected?
|
||||
convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself?
|
||||
1,0) # count flip
|
||||
convoluted[i,1:-1,1:-1,1:-1] != convoluted[i-1,1:-1,1:-1,1:-1], # flip of ID difference detected?
|
||||
convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself?
|
||||
1,0) # count flip
|
||||
|
||||
for i,feature_id in enumerate(feature_list):
|
||||
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
|
||||
|
||||
for i in xrange(len(feature_list)):
|
||||
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
|
||||
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
|
||||
|
||||
distance.shape = (len(feature_list),grid.prod())
|
||||
|
||||
# ------------------------------------------ add data ------------------------------------------
|
||||
for i in xrange(len(feature_list)):
|
||||
lastRow = table.data.shape[1]
|
||||
table.data=np.insert(table.data,lastRow,distance[i,:],1)
|
||||
stack.append(distance[i,:])
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
if len(stack) > 1: table.data = np.hstack(tuple(stack))
|
||||
table.data_writeArray('%.12g')
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -10,25 +10,6 @@ from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
|
|||
scriptID = string.replace('$Id: addGrainID.py 2549 2013-07-10 09:13:21Z MPIE\p.eisenlohr $','\n','\\n')
|
||||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
class extendedOption(Option):
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
|
||||
# taken from online tutorial http://docs.python.org/library/optparse.html
|
||||
|
||||
ACTIONS = Option.ACTIONS + ("extend",)
|
||||
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
|
||||
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
|
||||
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
|
||||
|
||||
def take_action(self, action, dest, opt, value, values, parser):
|
||||
if action == "extend":
|
||||
lvalue = value.split(",")
|
||||
values.ensure_value(dest, []).extend(lvalue)
|
||||
else:
|
||||
Option.take_action(self, action, dest, opt, value, values, parser)
|
||||
|
||||
|
||||
# -----------------------------
|
||||
class backgroundMessage(threading.Thread):
|
||||
# -----------------------------
|
||||
|
@ -67,117 +48,121 @@ class backgroundMessage(threading.Thread):
|
|||
self.print_message()
|
||||
|
||||
|
||||
parser = OptionParser(option_class=extendedOption, usage='%prog options [file[s]]', description = """
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Add grain index based on similiarity of crystal lattice orientation.
|
||||
""" + string.replace(scriptID,'\n','\\n')
|
||||
)
|
||||
|
||||
parser.add_option('-r', '--radius', dest='radius', type='float',
|
||||
parser.add_option('-r', '--radius',
|
||||
dest = 'radius',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'search radius')
|
||||
parser.add_option('-d', '--disorientation', dest='disorientation', type='float', metavar='ANGLE',
|
||||
parser.add_option('-d', '--disorientation',
|
||||
dest = 'disorientation',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'disorientation threshold per grain [%default] (degrees)')
|
||||
parser.add_option('-s', '--symmetry', dest='symmetry', type='string',
|
||||
parser.add_option('-s', '--symmetry',
|
||||
dest = 'symmetry',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal symmetry [%default]')
|
||||
parser.add_option('-e', '--eulers', dest='eulers', type='string', metavar='LABEL',
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'Euler angles')
|
||||
parser.add_option( '--degrees', dest='degrees', action='store_true',
|
||||
parser.add_option( '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'Euler angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix', dest='matrix', type='string', metavar='LABEL',
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest = 'matrix',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'orientation matrix')
|
||||
parser.add_option('-a', dest='a', type='string', metavar='LABEL',
|
||||
parser.add_option('-a',
|
||||
dest = 'a',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame a vector')
|
||||
parser.add_option('-b', dest='b', type='string', metavar='LABEL',
|
||||
parser.add_option('-b',
|
||||
dest = 'b',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame b vector')
|
||||
parser.add_option('-c', dest='c', type='string', metavar='LABEL',
|
||||
parser.add_option('-c',
|
||||
dest = 'c',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame c vector')
|
||||
parser.add_option('-q', '--quaternion', dest='quaternion', type='string', metavar='LABEL',
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest = 'quaternion',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'quaternion')
|
||||
parser.add_option('-p', '--position', dest='position', type='string', metavar='LABEL',
|
||||
parser.add_option('-p', '--position',
|
||||
dest = 'coords',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'spatial position of voxel [%default]')
|
||||
|
||||
parser.set_defaults(symmetry = 'cubic')
|
||||
parser.set_defaults(position = 'pos')
|
||||
parser.set_defaults(degrees = False)
|
||||
parser.set_defaults(symmetry = 'cubic',
|
||||
coords = 'pos',
|
||||
degrees = False,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
if options.radius == None:
|
||||
parser.error('no radius specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'quaternion': {'len':4,
|
||||
'label':[]},
|
||||
}
|
||||
input = [options.eulers != None,
|
||||
options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None,
|
||||
options.matrix != None,
|
||||
options.quaternion != None,
|
||||
]
|
||||
|
||||
if options.eulers != None: datainfo['vector']['label'] += [options.eulers]; input = 'eulers'
|
||||
if options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None: datainfo['vector']['label'] += [options.a,options.b,options.c]; input = 'frame'
|
||||
if options.matrix != None: datainfo['tensor']['label'] += [options.matrix]; input = 'matrix'
|
||||
if options.quaternion != None: datainfo['quaternion']['label'] += [options.quaternion]; input = 'quaternion'
|
||||
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
||||
|
||||
datainfo['vector']['label'] += [options.position]
|
||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
||||
(options.matrix,9,'matrix'),
|
||||
(options.quaternion,4,'quaternion'),
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
|
||||
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
cos_disorientation = np.cos(options.disorientation/2.0*toRadians)
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ setup file handles ---------------------------------------
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name+'_tmp','w'),
|
||||
'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
# ------------------------------------------ read header -------------------------------------------
|
||||
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
table.head_read()
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
# ------------------------------------------ sanity checks -----------------------------------------
|
||||
|
||||
# --------------- figure out columns to process
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
|
||||
if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
|
||||
else: column = table.label_index(label)
|
||||
|
||||
column = {}
|
||||
missingColumns = False
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = list(set([label, '1_'+label]) & set(table.labels)) # check for intersection with table labels
|
||||
if key == []:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
missingColumns = True # break if label not found
|
||||
else:
|
||||
column[label] = table.labels.index(key[0]) # remember columns of requested data
|
||||
|
||||
if missingColumns:
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
table.labels_append('grainID_%g'%options.disorientation)
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append('grainID_{}@{}'.format(','.join(labels),options.disorientation/toRadians)) # report orientation source and disorientation in degrees
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ---------------------------------------
|
||||
|
||||
# ------------------------------------------ build KD tree ---------------------------------------
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
# ------------------------------------------ build KD tree -----------------------------------------
|
||||
|
||||
# --- start background messaging
|
||||
|
||||
|
@ -186,178 +171,129 @@ for file in files:
|
|||
|
||||
bg.set_message('reading positions...')
|
||||
|
||||
backup_readSize = table.__IO__['validReadSize'] # bad hack to circumvent overwriting by readArray...
|
||||
backup_labels = table.labels # bad hack...
|
||||
table.data_rewind()
|
||||
table.data_readArray(range(column[options.position],
|
||||
column[options.position]+datainfo['vector']['len'])) # read position vectors
|
||||
# file['croak'].write('%i\n'%(len(table.data)))
|
||||
table.data_readArray(options.coords) # read position vectors
|
||||
grainID = -np.ones(len(table.data),dtype=int)
|
||||
|
||||
start = tick = time.clock()
|
||||
bg.set_message('building KD tree...')
|
||||
kdtree = spatial.KDTree(copy.deepcopy(table.data))
|
||||
# neighborhood = kdtree.query_ball_tree(kdtree,options.radius)
|
||||
# file['croak'].write('%.2f seconds\n'%(time.clock()-tick))
|
||||
# file['croak'].write('%i points\n'%(len(neighborhood)))
|
||||
|
||||
|
||||
# ------------------------------------------ assign grain IDs ---------------------------------------
|
||||
|
||||
orientations = [] # quaternions found for grain
|
||||
memberCounts = [] # number of voxels in grain
|
||||
|
||||
table.data_rewind()
|
||||
table.__IO__['validReadSize'] = backup_readSize # bad hack to circumvent overwriting by readArray...
|
||||
table.labels = backup_labels # bad hack...
|
||||
p = 0 # point counter
|
||||
g = 0 # grain counter
|
||||
matchedID = -1
|
||||
lastDistance = np.dot(kdtree.data[-1]-kdtree.data[0],kdtree.data[-1]-kdtree.data[0]) # (arbitrarily) use diagonal of cloud
|
||||
# ------------------------------------------ assign grain IDs --------------------------------------
|
||||
|
||||
tick = time.clock()
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
|
||||
orientations = [] # quaternions found for grain
|
||||
memberCounts = [] # number of voxels in grain
|
||||
p = 0 # point counter
|
||||
g = 0 # grain counter
|
||||
matchedID = -1
|
||||
lastDistance = np.dot(kdtree.data[-1]-kdtree.data[0],kdtree.data[-1]-kdtree.data[0]) # (arbitrarily) use diagonal of cloud
|
||||
|
||||
table.data_rewind()
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
|
||||
if p > 0 and p % 1000 == 0:
|
||||
|
||||
time_delta = (time.clock()-tick) * (len(grainID) - p) / p
|
||||
bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
|
||||
|
||||
if input == 'eulers':
|
||||
o = damask.Orientation(Eulers=toRadians*\
|
||||
np.array(map(float,table.data[column[options.eulers]:\
|
||||
column[options.eulers]+datainfo['vector']['len']])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'matrix':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.matrix]:\
|
||||
column[options.matrix]+datainfo['tensor']['len']])]).reshape(np.sqrt(datainfo['tensor']['len']),
|
||||
np.sqrt(datainfo['tensor']['len'])).transpose(),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'frame':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.a]:\
|
||||
column[options.a]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.b]:\
|
||||
column[options.b]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.c]:\
|
||||
column[options.c]+datainfo['vector']['len']]
|
||||
)]).reshape(3,3),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'quaternion':
|
||||
o = damask.Orientation(quaternion=\
|
||||
np.array(map(float,table.data[column[options.quaternion]:\
|
||||
column[options.quaternion]+datainfo['quaternion']['len']])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
if inputtype == 'eulers':
|
||||
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'matrix':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'frame':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
|
||||
table.data[column[1]:column[1]+3] + \
|
||||
table.data[column[2]:column[2]+3])).reshape(3,3),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'quaternion':
|
||||
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
|
||||
symmetry = options.symmetry).reduced()
|
||||
|
||||
matched = False
|
||||
|
||||
# check against last matched needs to be really picky. best would be to exclude jumps across the poke (checking distance between last and me?)
|
||||
# when walking through neighborhood first check whether grainID of that point has already been tested, if yes, skip!
|
||||
|
||||
if matchedID != -1: # has matched before?
|
||||
if matchedID != -1: # has matched before?
|
||||
matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation
|
||||
# if matchedID > 0: # has matched before?
|
||||
# thisDistance = np.dot(kdtree.data[p]-kdtree.data[p-1],kdtree.data[p]-kdtree.data[p-1],)
|
||||
# if thisDistance < 4.*lastDistance: # about as close as last point pair?
|
||||
# disorientation = o.disorientation(orientations[matchedID-1]).quaternion.w # check whether former grainID matches now again
|
||||
# matched = disorientation > cos_disorientation
|
||||
# lastDistance = thisDistance
|
||||
#
|
||||
|
||||
if not matched:
|
||||
alreadyChecked = {}
|
||||
bestDisorientation = damask.Orientation(quaternion=np.array([0,0,0,1]),symmetry = options.symmetry) # initialize to 180 deg rotation as worst case
|
||||
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
|
||||
bestDisorientation = damask.Orientation(quaternion = np.array([0,0,0,1]),
|
||||
symmetry = options.symmetry) # initialize to 180 deg rotation as worst case
|
||||
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
|
||||
gID = grainID[i]
|
||||
if gID != -1 and gID not in alreadyChecked: # an already indexed point belonging to a grain not yet tested?
|
||||
alreadyChecked[gID] = True # remember not to check again
|
||||
disorientation = o.disorientation(orientations[gID]) # compare against that grain's orientation
|
||||
if gID != -1 and gID not in alreadyChecked: # an already indexed point belonging to a grain not yet tested?
|
||||
alreadyChecked[gID] = True # remember not to check again
|
||||
disorientation = o.disorientation(orientations[gID]) # compare against that grain's orientation
|
||||
if disorientation.quaternion.w > cos_disorientation and \
|
||||
disorientation.quaternion.w >= bestDisorientation.quaternion.w: # within disorientation threshold and better than current best?
|
||||
disorientation.quaternion.w >= bestDisorientation.quaternion.w: # within disorientation threshold and better than current best?
|
||||
matched = True
|
||||
matchedID = gID # remember that grain
|
||||
# file['croak'].write('%i %f '%(matchedID,disorientation.quaternion.w))
|
||||
|
||||
matchedID = gID # remember that grain
|
||||
bestDisorientation = disorientation
|
||||
|
||||
if not matched: # no match -> new grain found
|
||||
memberCounts += [1] # start new membership counter
|
||||
orientations += [o] # initialize with current orientation
|
||||
if not matched: # no match -> new grain found
|
||||
memberCounts += [1] # start new membership counter
|
||||
orientations += [o] # initialize with current orientation
|
||||
matchedID = g
|
||||
g += 1 # increment grain counter
|
||||
# file['croak'].write('+')
|
||||
g += 1 # increment grain counter
|
||||
|
||||
else: # did match existing grain
|
||||
else: # did match existing grain
|
||||
memberCounts[matchedID] += 1
|
||||
# file['croak'].write('got back %s is close by %f to %s\n'%(np.degrees(bestQ.asEulers()),np.degrees(2*np.arccos(bestDisorientation.quaternion.w)),np.degrees(bestFormerQ.asEulers())))
|
||||
# file['croak'].write('.%i %s'%(matchedID, orientations[matchedID-1].quaternion))
|
||||
# M = (1. - 1./memberCounts[matchedID-1]) * bestFormerQ.asM() + 1./memberCounts[matchedID-1] * bestQ.asM() # 4x4 matrix holding weighted quaternion outer products per grain
|
||||
# w,v = np.linalg.eigh(M)
|
||||
# avgQ = damask.Orientation(quaternion=v[:,w.argmax()],symmetry=options.symmetry)
|
||||
# file['croak'].write('new avg has misori of %f\n'%np.degrees(2*np.arccos(orientations[matchedID-1].disorientation(avgQ)[0].quaternion.w)))
|
||||
# orientations[matchedID-1].quaternion = damask.Quaternion(v[:,w.argmax()])
|
||||
# orientations[matchedID-1] = damask.Orientation(quaternion = bestDisorientation.quaternion**(1./memberCounts[matchedID-1]) \
|
||||
# * orientations[matchedID-1].quaternion,
|
||||
# symmetry = options.symmetry) # adjust average orientation taking newest member into account
|
||||
# file['croak'].write(' stored --> %s\n'%(np.degrees(orientations[matchedID-1].quaternion.asEulers())))
|
||||
# file['croak'].write('.')
|
||||
|
||||
grainID[p] = matchedID # remember grain index assigned to point
|
||||
p += 1 # increment point
|
||||
grainID[p] = matchedID # remember grain index assigned to point
|
||||
p += 1 # increment point
|
||||
|
||||
bg.set_message('identifying similar orientations among %i grains...'%(len(orientations)))
|
||||
bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations)))
|
||||
|
||||
memberCounts = np.array(memberCounts)
|
||||
similarOrientations = [[] for i in xrange(len(orientations))]
|
||||
|
||||
for i,orientation in enumerate(orientations): # compare each identified orientation...
|
||||
for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards
|
||||
if orientation.disorientation(orientations[j]).quaternion.w > cos_disorientation: # similar orientations in both grainIDs?
|
||||
similarOrientations[i].append(j) # remember in upper triangle...
|
||||
similarOrientations[j].append(i) # ...and lower triangle of matrix
|
||||
for i,orientation in enumerate(orientations): # compare each identified orientation...
|
||||
for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards
|
||||
if orientation.disorientation(orientations[j]).quaternion.w > cos_disorientation: # similar orientations in both grainIDs?
|
||||
similarOrientations[i].append(j) # remember in upper triangle...
|
||||
similarOrientations[j].append(i) # ...and lower triangle of matrix
|
||||
|
||||
if similarOrientations[i] != []:
|
||||
bg.set_message('grainID %i is as: %s'%(i,' '.join(map(lambda x:str(x),similarOrientations[i]))))
|
||||
bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(lambda x:str(x),similarOrientations[i]))))
|
||||
|
||||
stillShifting = True
|
||||
while stillShifting:
|
||||
stillShifting = False
|
||||
tick = time.clock()
|
||||
|
||||
for p,gID in enumerate(grainID): # walk through all points
|
||||
for p,gID in enumerate(grainID): # walk through all points
|
||||
if p > 0 and p % 1000 == 0:
|
||||
|
||||
time_delta = (time.clock()-tick) * (len(grainID) - p) / p
|
||||
bg.set_message('(%02i:%02i:%02i) shifting ID of point %i out of %i (grain count %i)...'%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
|
||||
if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else?
|
||||
similarNeighbors = defaultdict(int) # dict holding frequency of neighboring grainIDs that share my orientation (freq info not used...)
|
||||
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
|
||||
if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation?
|
||||
similarNeighbors[grainID[i]] += 1 # remember its grainID
|
||||
if similarNeighbors != {}: # found similar orientation(s) in neighborhood
|
||||
candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me
|
||||
grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset
|
||||
memberCounts[gID] -= 1 # my former ID loses one fellow
|
||||
memberCounts[grainID[p]] += 1 # my new ID gains one fellow
|
||||
bg.set_message('%i:%i --> %i'%(p,gID,grainID[p])) # report switch of grainID
|
||||
if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else?
|
||||
similarNeighbors = defaultdict(int) # dict holding frequency of neighboring grainIDs that share my orientation (freq info not used...)
|
||||
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
|
||||
if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation?
|
||||
similarNeighbors[grainID[i]] += 1 # remember its grainID
|
||||
if similarNeighbors != {}: # found similar orientation(s) in neighborhood
|
||||
candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me
|
||||
grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset
|
||||
memberCounts[gID] -= 1 # my former ID loses one fellow
|
||||
memberCounts[grainID[p]] += 1 # my new ID gains one fellow
|
||||
bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID
|
||||
stillShifting = True
|
||||
|
||||
table.data_rewind()
|
||||
p = 0
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
table.data_append(1+grainID[p]) # add grain ID
|
||||
table.data_write() # output processed line
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
table.data_append(1+grainID[p]) # add grain ID
|
||||
table.data_write() # output processed line
|
||||
p += 1
|
||||
|
||||
bg.set_message('done after %i seconds'%(time.clock()-start))
|
||||
bg.set_message('done after {} seconds'.format(time.clock()-start))
|
||||
|
||||
# for i,o in enumerate(orientations): # croak about average grain orientations
|
||||
# file['croak'].write('%i: %s\n'%(i,' '.join(map(str,o.quaternion.asEulers()))))
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
|
||||
table.output_flush() # just in case of buffered ASCII table
|
||||
table.close() # close ASCII tables
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -18,28 +18,45 @@ Add RGB color value corresponding to TSL-OIM scheme for inverse pole figures.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-p', '--pole', dest='pole', type='float', nargs=3, metavar='float float float',
|
||||
parser.add_option('-p', '--pole',
|
||||
dest = 'pole',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'lab frame direction for inverse pole figure [%default]')
|
||||
parser.add_option('-s', '--symmetry', dest='symmetry', type='choice',
|
||||
choices=damask.Symmetry.lattices[1:], metavar='string',
|
||||
help = 'crystal symmetry [cubic] {%s} '%(', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('-e', '--eulers', dest='eulers', metavar='string',
|
||||
parser.add_option('-s', '--symmetry',
|
||||
dest = 'symmetry',
|
||||
type = 'choice', choices = damask.Symmetry.lattices[1:], metavar='string',
|
||||
help = 'crystal symmetry [%default] {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'Euler angles label')
|
||||
parser.add_option('-d', '--degrees', dest='degrees', action='store_true',
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'Euler angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix', dest='matrix', metavar='string',
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest = 'matrix',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'orientation matrix label')
|
||||
parser.add_option('-a', dest='a', metavar='string',
|
||||
parser.add_option('-a',
|
||||
dest = 'a',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame a vector label')
|
||||
parser.add_option('-b', dest='b', metavar='string',
|
||||
parser.add_option('-b',
|
||||
dest = 'b',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame b vector label')
|
||||
parser.add_option('-c', dest='c', metavar='string',
|
||||
parser.add_option('-c',
|
||||
dest = 'c',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame c vector label')
|
||||
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string',
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest = 'quaternion',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'quaternion label')
|
||||
|
||||
parser.set_defaults(pole = (0.0,0.0,1.0),
|
||||
symmetry = 'cubic',
|
||||
symmetry = damask.Symmetry.lattices[-1],
|
||||
degrees = False,
|
||||
)
|
||||
|
||||
|
@ -53,75 +70,69 @@ input = [options.eulers != None,
|
|||
options.quaternion != None,
|
||||
]
|
||||
|
||||
if np.sum(input) != 1: parser.error('needs exactly one input format...')
|
||||
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
||||
|
||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
||||
(options.matrix,9,'matrix'),
|
||||
(options.quaternion,4,'quaternion'),
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
pole = np.array(options.pole)
|
||||
pole /= np.linalg.norm(pole)
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
# --- loop over input files ------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
if not np.all(table.label_dimension(label) == dim):
|
||||
file['croak'].write('input %s has wrong dimension %i...\n'%(label,dim))
|
||||
table.croak('input {} has wrong dimension {}.'.format(label,dim))
|
||||
table.close(dismiss = True) # close ASCIItable and remove empty file
|
||||
continue
|
||||
|
||||
column = table.label_index(label)
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append(['%i_IPF_%g%g%g_%s'%(i+1,options.pole[0],options.pole[1],options.pole[2],options.symmetry.lower()) for i in xrange(3)])
|
||||
table.labels_append(['{}_IPF_{}{}{}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in xrange(3)])
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
if inputtype == 'eulers':
|
||||
o = damask.Orientation(Eulers=toRadians*\
|
||||
np.array(map(float,table.data[column:column+3])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'matrix':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column:column+9])]).reshape(3,3).transpose(),
|
||||
symmetry=options.symmetry).reduced()
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'frame':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[0]:column[0]+3] + \
|
||||
table.data[column[1]:column[1]+3] + \
|
||||
table.data[column[2]:column[2]+3]
|
||||
)]).reshape(3,3),
|
||||
symmetry=options.symmetry).reduced()
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
|
||||
table.data[column[1]:column[1]+3] + \
|
||||
table.data[column[2]:column[2]+3])).reshape(3,3),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'quaternion':
|
||||
o = damask.Orientation(quaternion=\
|
||||
np.array(map(float,table.data[column:column+4])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
|
||||
symmetry = options.symmetry).reduced()
|
||||
|
||||
table.data_append(o.IPFcolor(pole))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -17,102 +17,89 @@ Add data in column(s) of second ASCIItable selected from row that is given by th
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-a','--asciitable', dest='asciitable', metavar='string',
|
||||
help='mapped ASCIItable')
|
||||
parser.add_option('-c','--map', dest='map', metavar='string',
|
||||
help='heading of column containing row mapping')
|
||||
parser.add_option('-o','--offset', dest='offset', type='int', metavar='int',
|
||||
help='offset between mapped column value and row [%default]')
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing field values of special dimension')
|
||||
parser.add_option('-d','--dimension', dest='N', type='int', metavar='int',
|
||||
help='dimension of special field values [%default]')
|
||||
parser.set_defaults(offset = 0)
|
||||
parser.set_defaults(N = 1)
|
||||
parser.add_option('-c','--map',
|
||||
dest = 'map',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'heading of column containing row mapping')
|
||||
parser.add_option('-o','--offset',
|
||||
dest = 'offset',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'offset between mapping column value and actual row in mapped table [%default]')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help='heading of column(s) to be mapped')
|
||||
parser.add_option('-a','--asciitable',
|
||||
dest = 'asciitable',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'mapped ASCIItable')
|
||||
|
||||
parser.set_defaults(offset = 0,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if (not None) in [options.vector,options.tensor,options.special]:
|
||||
parser.error('no data column specified...')
|
||||
if options.label == None:
|
||||
parser.error('no data columns specified.')
|
||||
if options.map == None:
|
||||
parser.error('missing mapping column...')
|
||||
parser.error('no mapping column given.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
'special': {'len':options.N,
|
||||
'label':[]},
|
||||
}
|
||||
# ------------------------------------------ process mapping ASCIItable ---------------------------
|
||||
|
||||
if options.vector != None: datainfo['vector']['label'] += options.vector
|
||||
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
|
||||
if options.special != None: datainfo['special']['label'] += options.special
|
||||
|
||||
# ------------------------------------------ processing mapping ASCIItable -------------------------
|
||||
if options.asciitable != None and os.path.isfile(options.asciitable):
|
||||
mappedTable = damask.ASCIItable(open(options.asciitable),None,False)
|
||||
|
||||
mappedTable = damask.ASCIItable(name = options.asciitable,buffered = False, readonly = True)
|
||||
mappedTable.head_read() # read ASCII header info of mapped table
|
||||
missing_labels = mappedTable.data_readArray(options.label)
|
||||
|
||||
labels = []
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
keys = ['%i_'%(i+1)+label for i in xrange(info['len'])] if info['len'] > 1 else [label]
|
||||
if set(keys).issubset(mappedTable.labels):
|
||||
labels+=keys # extend labels
|
||||
else:
|
||||
sys.stderr.write('column %s not found...\n'%label)
|
||||
break
|
||||
|
||||
mappedTable.data_readArray(labels)
|
||||
mappedTable.input_close() # close mapped input ASCII table
|
||||
if len(missing_labels) > 0:
|
||||
mappedTable.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
|
||||
|
||||
else:
|
||||
parser.error('missing mapped ASCIItable...')
|
||||
parser.error('no mapped ASCIItable given.')
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
|
||||
if options.map not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%options.map)
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
|
||||
mappedColumn = table.label_index(options.map)
|
||||
if mappedColumn < 0: errors.append('mapping column {} not found.'.format(options.map))
|
||||
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for label in mappedTable.labels:
|
||||
table.labels_append(label)
|
||||
table.labels_append(mappedTable.labels) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
mappedColumn = table.labels.index(options.map)
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
table.data_append(mappedTable.data[int(table.data[mappedColumn])+options.offset-1]) # add all mapped data types
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
||||
mappedTable.close() # close mapped input ASCII table
|
||||
|
|
|
@ -29,76 +29,78 @@ Add vonMises equivalent values for symmetric part of requested strains and/or st
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-e','--strain', dest='strain', action='extend', metavar='<string LIST>',
|
||||
help='heading(s) of columns containing strain tensors')
|
||||
parser.add_option('-s','--stress', dest='stress', action='extend', metavar='<string LIST>',
|
||||
help='heading(s) of columns containing stress tensors')
|
||||
parser.add_option('-e','--strain',
|
||||
dest = 'strain',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading(s) of columns containing strain tensors')
|
||||
parser.add_option('-s','--stress',
|
||||
dest = 'stress',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading(s) of columns containing stress tensors')
|
||||
|
||||
parser.set_defaults(strain = [],
|
||||
stress = [],
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if (not None) in [options.strain,options.stress]:
|
||||
if len(options.stress+options.strain) == 0:
|
||||
parser.error('no data column specified...')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'strain': {'len':9,
|
||||
'label':[]},
|
||||
'stress': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if options.strain != None: datainfo['strain']['label'] += options.strain
|
||||
if options.stress != None: datainfo['stress']['label'] += options.stress
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_read()
|
||||
|
||||
active = defaultdict(list)
|
||||
column = defaultdict(dict)
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
items = {
|
||||
'strain': {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []},
|
||||
'stress': {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
||||
else:
|
||||
active[datatype].append(label)
|
||||
column[datatype][label] = table.labels.index(key) # remember columns of requested data
|
||||
items[type]['active'].append(what)
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested determinants
|
||||
table.labels_append('Mises(%s)'%label) # extend ASCII header with new labels
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested norms
|
||||
table.data_append(Mises(datatype,
|
||||
np.array(map(float,table.data[column[datatype][label]:
|
||||
column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(3,3)))
|
||||
for type, data in items.iteritems():
|
||||
for column in data['column']:
|
||||
table.data_append(Mises(type,
|
||||
np.array(table.data[column:column+data['dim']],'d').reshape(data['shape'])))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -10,14 +10,15 @@ scriptID = string.replace('$Id$','\n','\\n')
|
|||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
# definition of element-wise p-norms for matrices
|
||||
def normAbs(object): # p = 1
|
||||
return sum(map(abs, object))
|
||||
|
||||
def normFrobenius(object): # p = 2
|
||||
return math.sqrt(sum([x*x for x in object]))
|
||||
def norm(which,object):
|
||||
|
||||
def normMax(object): # p = infinity
|
||||
return max(map(abs, object))
|
||||
if which == 'Abs': # p = 1
|
||||
return sum(map(abs, object))
|
||||
elif which == 'Frobenius': # p = 2
|
||||
return math.sqrt(sum([x*x for x in object]))
|
||||
elif which == 'Max': # p = inf
|
||||
return max(map(abs, object))
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
|
@ -29,86 +30,73 @@ Add column(s) containing norm of requested column(s) being either vectors or ten
|
|||
""", version = scriptID)
|
||||
|
||||
normChoices = ['abs','frobenius','max']
|
||||
parser.add_option('-n','--norm', dest='norm', type='choice', choices=normChoices, metavar='string',
|
||||
help='type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices))))
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing vector field values')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing field values of special dimension')
|
||||
parser.add_option('-d','--dimension', dest='N', type='int', metavar='int',
|
||||
help='dimension of special field values [%default]')
|
||||
parser.set_defaults(norm = 'frobenius')
|
||||
parser.set_defaults(N = 12)
|
||||
parser.add_option('-n','--norm',
|
||||
dest = 'norm',
|
||||
type = 'choice', choices = normChoices, metavar='string',
|
||||
help = 'type of element-wise p-norm [frobenius] {%s}'%(','.join(map(str,normChoices))))
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of column(s) to calculate norm of')
|
||||
|
||||
parser.set_defaults(norm = 'frobenius',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if (not None) in [options.vector,options.tensor,options.special]:
|
||||
parser.error('no data column specified...')
|
||||
if options.label == None:
|
||||
parser.error('no data column specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
'special': {'len':options.N,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if options.vector != None: datainfo['vector']['label'] += options.vector
|
||||
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
|
||||
if options.special != None: datainfo['special']['label'] += options.special
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
remarks = []
|
||||
columns = []
|
||||
dims = []
|
||||
|
||||
for what in options.label:
|
||||
dim = table.label_dimension(what)
|
||||
if dim < 0: remarks.append('column {} not found...'.format(what))
|
||||
else:
|
||||
dims.append(dim)
|
||||
columns.append(table.label_index(what))
|
||||
table.labels_append('norm{}({})'.format(options.norm.capitalize(),what)) # extend ASCII header with new labels
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
|
||||
active = defaultdict(list)
|
||||
column = defaultdict(dict)
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_'+label if info['len'] > 1 else label # columns of non-scalars need to start with '1_'
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
else:
|
||||
active[datatype].append(label)
|
||||
column[datatype][label] = table.labels.index(key) # remember columns of requested data
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested determinants
|
||||
table.labels_append('norm%s(%s)'%(options.norm.capitalize(),label)) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for datatype,labels in active.items(): # loop over vector,tensor
|
||||
for label in labels: # loop over all requested norms
|
||||
eval("table.data_append(norm%s(map(float,table.data[column[datatype][label]:"\
|
||||
"column[datatype][label]+datainfo[datatype]['len']])))"%options.norm.capitalize())
|
||||
for column,dim in zip(columns,dims):
|
||||
table.data_append(norm(options.norm.capitalize(),
|
||||
map(float,table.data[column:column+dim])))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -20,151 +20,141 @@ Orientation is given by quaternion, Euler angles, rotation matrix, or crystal fr
|
|||
""", version = scriptID)
|
||||
|
||||
outputChoices = ['quaternion','eulers']
|
||||
parser.add_option('-o', '--output', dest='output', action='extend', metavar='<string LIST>',
|
||||
parser.add_option('-o', '--output',
|
||||
dest = 'output',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'output orientation formats {%s}'%(','.join(outputChoices)))
|
||||
parser.add_option('-s', '--symmetry', dest='symmetry', type='choice',
|
||||
choices=damask.Symmetry.lattices[1:], metavar='string',
|
||||
help = 'crystal symmetry [cubic] {%s}'%(', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('-r', '--rotation', dest='rotation', type='float', nargs=4, metavar='float float float float',
|
||||
parser.add_option('-r', '--rotation',
|
||||
dest='rotation',
|
||||
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
||||
help = 'angle and axis to (pre)rotate orientation')
|
||||
parser.add_option('-e', '--eulers', dest='eulers', metavar='string',
|
||||
|
||||
parser.add_option('-s', '--symmetry',
|
||||
dest = 'symmetry',
|
||||
type = 'choice', choices = damask.Symmetry.lattices[1:], metavar='string',
|
||||
help = 'crystal symmetry [%default] {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'Euler angles label')
|
||||
parser.add_option('-d', '--degrees', dest='degrees', action='store_true',
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'Euler angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix', dest='matrix', metavar='string',
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest = 'matrix',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'orientation matrix label')
|
||||
parser.add_option('-a', dest='a', metavar='string',
|
||||
parser.add_option('-a',
|
||||
dest = 'a',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame a vector label')
|
||||
parser.add_option('-b', dest='b', metavar='string',
|
||||
parser.add_option('-b',
|
||||
dest = 'b',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame b vector label')
|
||||
parser.add_option('-c', dest='c', metavar='string',
|
||||
parser.add_option('-c',
|
||||
dest = 'c',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame c vector label')
|
||||
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string',
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest = 'quaternion',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'quaternion label')
|
||||
parser.set_defaults(symmetry = 'cubic')
|
||||
parser.set_defaults(rotation = (0.,1.,1.,1.)) # no rotation about 1,1,1
|
||||
parser.set_defaults(degrees = False)
|
||||
|
||||
parser.set_defaults(output = [],
|
||||
symmetry = damask.Symmetry.lattices[-1],
|
||||
rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
||||
degrees = False,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'quaternion': {'len':4,
|
||||
'label':[]},
|
||||
}
|
||||
|
||||
if options.output == None or (not set(options.output).issubset(set(outputChoices))):
|
||||
parser.error('output must be chosen from %s...'%(', '.join(outputChoices)))
|
||||
|
||||
input=[]
|
||||
if options.eulers != None:
|
||||
datainfo['vector']['label'] += [options.eulers]
|
||||
input.append('eulers')
|
||||
if options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None:
|
||||
datainfo['vector']['label'] += [options.a,options.b,options.c]
|
||||
input.append('frame')
|
||||
if options.matrix != None:
|
||||
datainfo['tensor']['label'] += [options.matrix]
|
||||
input.append('matrix')
|
||||
if options.quaternion != None:
|
||||
datainfo['quaternion']['label'] += [options.quaternion]
|
||||
input.append('quaternion')
|
||||
|
||||
if len(input) != 1: parser.error('needs exactly one input format...')
|
||||
input = input[0]
|
||||
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
options.output = map(lambda x: x.lower(), options.output)
|
||||
if options.output == [] or (not set(options.output).issubset(set(outputChoices))):
|
||||
parser.error('output must be chosen from {}.'.format(', '.join(outputChoices)))
|
||||
|
||||
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:])
|
||||
input = [options.eulers != None,
|
||||
options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None,
|
||||
options.matrix != None,
|
||||
options.quaternion != None,
|
||||
]
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
||||
|
||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
||||
(options.matrix,9,'matrix'),
|
||||
(options.quaternion,4,'quaternion'),
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:]) # pre-rotation
|
||||
|
||||
# --- loop over input files ------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
column = {}
|
||||
missingColumns = False
|
||||
table.head_read()
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_'+label if info['len'] > 1 else label # non-special labels have to start with '1_'
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
missingColumns = True # break if label not found
|
||||
else:
|
||||
column[label] = table.labels.index(key) # remember columns of requested data
|
||||
# ------------------------------------------ sanity checks -----------------------------------------
|
||||
|
||||
if missingColumns:
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
|
||||
else: column = table.label_index(label)
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for output in options.output:
|
||||
if output == 'quaternion':
|
||||
table.labels_append(['%i_quaternion_%s'%(i+1,options.symmetry) for i in xrange(4)])
|
||||
if output == 'eulers':
|
||||
table.labels_append(['%i_eulers_%s'%(i+1,options.symmetry) for i in xrange(3)])
|
||||
if output == 'quaternion': table.labels_append(['{}_quat({})'.format( i+1,options.symmetry) for i in xrange(4)])
|
||||
if output == 'eulers': table.labels_append(['{}_eulers({})'.format(i+1,options.symmetry) for i in xrange(3)])
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
if input == 'eulers':
|
||||
o = damask.Orientation(Eulers=toRadians*\
|
||||
np.array(map(float,table.data[column[options.eulers]:\
|
||||
column[options.eulers]+datainfo['vector']['len']])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'matrix':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.matrix]:\
|
||||
column[options.matrix]+datainfo['tensor']['len']])]).reshape(3,3),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'frame':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.a]:\
|
||||
column[options.a]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.b]:\
|
||||
column[options.b]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.c]:\
|
||||
column[options.c]+datainfo['vector']['len']]
|
||||
)]).reshape(3,3),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif input == 'quaternion':
|
||||
o = damask.Orientation(quaternion=\
|
||||
np.array(map(float,table.data[column[options.quaternion]:\
|
||||
column[options.quaternion]+datainfo['quaternion']['len']])),
|
||||
symmetry=options.symmetry).reduced()
|
||||
if inputtype == 'eulers':
|
||||
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'matrix':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose(),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'frame':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
|
||||
table.data[column[1]:column[1]+3] + \
|
||||
table.data[column[2]:column[2]+3])).reshape(3,3),
|
||||
symmetry = options.symmetry).reduced()
|
||||
elif inputtype == 'quaternion':
|
||||
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
|
||||
symmetry = options.symmetry).reduced()
|
||||
|
||||
o.quaternion = r*o.quaternion
|
||||
|
||||
for output in options.output:
|
||||
if output == 'quaternion':
|
||||
table.data_append(o.asQuaternion())
|
||||
if output == 'eulers':
|
||||
table.data_append(o.asEulers('Bunge'))
|
||||
if output == 'quaternion': table.data_append(o.asQuaternion())
|
||||
if output == 'eulers': table.data_append(o.asEulers('Bunge'))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -19,59 +19,67 @@ Add column(s) containing Second Piola--Kirchhoff stress based on given column(s)
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string',
|
||||
help='heading of columns containing deformation gradient [%default]')
|
||||
parser.add_option('-p','--stress', dest='stress', metavar='string',
|
||||
help='heading of columns containing first Piola--Kirchhoff stress [%default]')
|
||||
parser.set_defaults(defgrad = 'f')
|
||||
parser.set_defaults(stress = 'p')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'heading of columns containing deformation gradient [%default]')
|
||||
parser.add_option('-p','--stress',
|
||||
dest = 'stress',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
|
||||
|
||||
parser.set_defaults(defgrad = 'f',
|
||||
stress = 'p',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
missingColumns = False
|
||||
column={ 'defgrad': table.labels.index('1_'+options.defgrad),
|
||||
'stress': table.labels.index('1_'+options.stress)}
|
||||
for key in column:
|
||||
if column[key]<1:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
missingColumns=True
|
||||
if missingColumns: continue
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
errors = []
|
||||
column = {}
|
||||
|
||||
for tensor in [options.defgrad,options.stress]:
|
||||
dim = table.label_dimension(tensor)
|
||||
if dim < 0: errors.append('column {} not found.'.format(tensor))
|
||||
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
|
||||
else:
|
||||
column[tensor] = table.label_index(tensor)
|
||||
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append(['%i_S'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
F = np.array(map(float,table.data[column['defgrad']:column['defgrad']+9]),'d').reshape(3,3)
|
||||
P = np.array(map(float,table.data[column['stress'] :column['stress']+9]),'d').reshape(3,3)
|
||||
F = np.array(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9]),'d').reshape(3,3)
|
||||
P = np.array(map(float,table.data[column[options.stress ]:column[options.stress ]+9]),'d').reshape(3,3)
|
||||
table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1]
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -18,127 +18,116 @@ Add x,y coordinates of stereographic projection of given direction (pole) in cry
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-p', '--pole', dest='pole', type='float', nargs=3, metavar='float float float',
|
||||
parser.add_option('-p', '--pole',
|
||||
dest = 'pole',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'crystal frame direction for pole figure [%default]')
|
||||
parser.add_option('--polar', dest='polar', action='store_true',
|
||||
parser.add_option('--polar',
|
||||
dest = 'polar',
|
||||
action = 'store_true',
|
||||
help = 'output polar coordinates r,phi [%default]')
|
||||
parser.add_option('-e', '--eulers', dest='eulers', metavar='string',
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'Euler angles label')
|
||||
parser.add_option('-d', '--degrees', dest='degrees', action='store_true',
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'Euler angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix', dest='matrix', metavar='string',
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest = 'matrix',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'orientation matrix label')
|
||||
parser.add_option('-a', dest='a', metavar='string',
|
||||
parser.add_option('-a',
|
||||
dest = 'a',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame a vector label')
|
||||
parser.add_option('-b', dest='b', metavar='string',
|
||||
parser.add_option('-b',
|
||||
dest = 'b',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame b vector label')
|
||||
parser.add_option('-c', dest='c', metavar='string',
|
||||
parser.add_option('-c',
|
||||
dest = 'c',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame c vector label')
|
||||
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string',
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest = 'quaternion',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'quaternion label')
|
||||
parser.set_defaults(pole = (1.0,0.0,0.0))
|
||||
parser.set_defaults(degrees = False)
|
||||
parser.set_defaults(polar = False)
|
||||
|
||||
parser.set_defaults(pole = (1.0,0.0,0.0),
|
||||
degrees = False,
|
||||
polar = False,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'quaternion': {'len':4,
|
||||
'label':[]},
|
||||
}
|
||||
input = [options.eulers != None,
|
||||
options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None,
|
||||
options.matrix != None,
|
||||
options.quaternion != None,
|
||||
]
|
||||
|
||||
input = []
|
||||
if options.eulers != None:
|
||||
datainfo['vector']['label'] += [options.eulers]
|
||||
input += ['eulers']
|
||||
if options.a != None and \
|
||||
options.b != None and \
|
||||
options.c != None:
|
||||
datainfo['vector']['label'] += [options.a,options.b,options.c]
|
||||
input += ['frame']
|
||||
if options.matrix != None:
|
||||
datainfo['tensor']['label'] += [options.matrix]
|
||||
input += ['matrix']
|
||||
if options.quaternion != None:
|
||||
datainfo['quaternion']['label'] += [options.quaternion]
|
||||
input += ['quaternion']
|
||||
if np.sum(input) != 1: parser.error('needs exactly one input format.')
|
||||
|
||||
if len(input) != 1: parser.error('needs exactly one input format...')
|
||||
input = input[0]
|
||||
|
||||
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
||||
(options.matrix,9,'matrix'),
|
||||
(options.quaternion,4,'quaternion'),
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
pole = np.array(options.pole)
|
||||
pole /= np.linalg.norm(pole)
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
# --- loop over input files ------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
column = {}
|
||||
missingColumns = False
|
||||
table.head_read()
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = list(set([label, '1_'+label]) & set(table.labels))
|
||||
if key == []:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
missingColumns = True # break if label not found
|
||||
else:
|
||||
column[label] = table.labels.index(key[0]) # remember columns of requested data
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
if missingColumns:
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if not np.all(table.label_dimension(label) == dim): errors.append('input {} has wrong dimension {}.'.format(label,dim))
|
||||
else: column = table.label_index(label)
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
table.labels_append(['%i_pole_%g%g%g'%(i+1,options.pole[0],options.pole[1],options.pole[2]) for i in xrange(2)])
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in xrange(2)])
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
if input == 'eulers':
|
||||
o = damask.Orientation(Eulers=toRadians*\
|
||||
np.array(map(float,table.data[column[options.eulers]:\
|
||||
column[options.eulers]+datainfo['vector']['len']])),
|
||||
)
|
||||
elif input == 'matrix':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.matrix]:\
|
||||
column[options.matrix]+datainfo['tensor']['len']])]).reshape(np.sqrt(datainfo['tensor']['len']),
|
||||
np.sqrt(datainfo['tensor']['len'])).transpose(),
|
||||
)
|
||||
elif input == 'frame':
|
||||
o = damask.Orientation(matrix=\
|
||||
np.array([map(float,table.data[column[options.a]:\
|
||||
column[options.a]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.b]:\
|
||||
column[options.b]+datainfo['vector']['len']] + \
|
||||
table.data[column[options.c]:\
|
||||
column[options.c]+datainfo['vector']['len']]
|
||||
)]).reshape(3,3),
|
||||
)
|
||||
elif input == 'quaternion':
|
||||
o = damask.Orientation(quaternion=\
|
||||
np.array(map(float,table.data[column[options.quaternion]:\
|
||||
column[options.quaternion]+datainfo['quaternion']['len']])),
|
||||
)
|
||||
if inputtype == 'eulers':
|
||||
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians)
|
||||
elif inputtype == 'matrix':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column:column+9])).reshape(3,3).transpose())
|
||||
elif inputtype == 'frame':
|
||||
o = damask.Orientation(matrix = np.array(map(float,table.data[column[0]:column[0]+3] + \
|
||||
table.data[column[1]:column[1]+3] + \
|
||||
table.data[column[2]:column[2]+3])).reshape(3,3))
|
||||
elif inputtype == 'quaternion':
|
||||
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])))
|
||||
|
||||
rotatedPole = o.quaternion*pole # rotate pole according to crystal orientation
|
||||
(x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection
|
||||
|
@ -147,10 +136,7 @@ for name in filenames:
|
|||
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -19,67 +19,70 @@ Add column(s) containing eigenvalues and eigenvectors of requested tensor column
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing tensor field values')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'heading of columns containing tensor field values')
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.tensor == None:
|
||||
parser.error('no data column specified...')
|
||||
parser.error('no data column specified.')
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
datainfo['tensor']['label'] += options.tensor
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],True) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_read()
|
||||
|
||||
active = []
|
||||
column = defaultdict(dict)
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
for label in datainfo['tensor']['label']:
|
||||
key = '1_%s'%label
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
else:
|
||||
active.append(label)
|
||||
column[label] = table.labels.index(key) # remember columns of requested data
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
||||
else:
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
table.labels_append(['{}_eigval({})'.format(i+1,what) for i in xrange(3)]) # extend ASCII header with new labels
|
||||
table.labels_append(['{}_eigvec({})'.format(i+1,what) for i in xrange(9)]) # extend ASCII header with new labels
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
for label in active:
|
||||
table.labels_append(['%i_eigval(%s)'%(i+1,label) for i in xrange(3)]) # extend ASCII header with new labels
|
||||
table.labels_append(['%i_eigvec(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for label in active: # loop over requested data
|
||||
tensor = np.array(map(float,table.data[column[label]:column[label]+datainfo['tensor']['len']])).\
|
||||
reshape((datainfo['tensor']['len']//3,3))
|
||||
(u,v) = np.linalg.eig(tensor)
|
||||
table.data_append(list(u))
|
||||
table.data_append(list(v.transpose().reshape(datainfo['tensor']['len'])))
|
||||
for type, data in items.iteritems():
|
||||
for column in data['column']:
|
||||
(u,v) = np.linalg.eig(np.array(map(float,table.data[column:column+data['dim']])).reshape(data['shape']))
|
||||
table.data_append(list(u))
|
||||
table.data_append(list(v.transpose().reshape(data['dim'])))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -11,12 +11,13 @@ scriptID = string.replace('$Id$','\n','\\n')
|
|||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
def operator(stretch,strain,eigenvalues):
|
||||
''' Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102 '''
|
||||
return {
|
||||
'V#ln': np.log(eigenvalues) ,
|
||||
'U#ln': np.log(eigenvalues) ,
|
||||
'V#Biot': ( np.ones(3,'d') - 1.0/eigenvalues ) ,
|
||||
'U#Biot': ( eigenvalues - np.ones(3,'d') ) ,
|
||||
'V#Green': ( np.ones(3,'d') - 1.0/eigenvalues*eigenvalues) *0.5,
|
||||
'V#Green': ( np.ones(3,'d') - 1.0/eigenvalues/eigenvalues) *0.5,
|
||||
'U#Green': ( eigenvalues*eigenvalues - np.ones(3,'d')) *0.5,
|
||||
}[stretch+'#'+strain]
|
||||
|
||||
|
@ -30,29 +31,43 @@ Add column(s) containing given strains based on given stretches of requested def
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-u','--right', dest='right', action='store_true',
|
||||
help='material strains based on right Cauchy--Green deformation, i.e., C and U')
|
||||
parser.add_option('-v','--left', dest='left', action='store_true',
|
||||
help='spatial strains based on left Cauchy--Green deformation, i.e., B and V')
|
||||
parser.add_option('-0','--logarithmic', dest='logarithmic', action='store_true',
|
||||
help='calculate logarithmic strain tensor')
|
||||
parser.add_option('-1','--biot', dest='biot', action='store_true',
|
||||
help='calculate biot strain tensor')
|
||||
parser.add_option('-2','--green', dest='green', action='store_true',
|
||||
help='calculate green strain tensor')
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', action='extend', metavar = '<string LIST>',
|
||||
help='heading(s) of columns containing deformation tensor values [%default]')
|
||||
parser.set_defaults(right = False)
|
||||
parser.set_defaults(left = False)
|
||||
parser.set_defaults(logarithmic = False)
|
||||
parser.set_defaults(biot = False)
|
||||
parser.set_defaults(green = False)
|
||||
parser.set_defaults(defgrad = ['f'])
|
||||
parser.add_option('-u','--right',
|
||||
dest = 'right',
|
||||
action = 'store_true',
|
||||
help = 'material strains based on right Cauchy--Green deformation, i.e., C and U')
|
||||
parser.add_option('-v','--left',
|
||||
dest = 'left',
|
||||
action = 'store_true',
|
||||
help = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V')
|
||||
parser.add_option('-0','--logarithmic',
|
||||
dest = 'logarithmic',
|
||||
action = 'store_true',
|
||||
help = 'calculate logarithmic strain tensor')
|
||||
parser.add_option('-1','--biot',
|
||||
dest = 'biot',
|
||||
action = 'store_true',
|
||||
help = 'calculate biot strain tensor')
|
||||
parser.add_option('-2','--green',
|
||||
dest = 'green',
|
||||
action = 'store_true',
|
||||
help = 'calculate green strain tensor')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad',
|
||||
action = 'extend',
|
||||
metavar = '<string LIST>',
|
||||
help = 'heading(s) of columns containing deformation tensor values [%default]')
|
||||
|
||||
parser.set_defaults(right = False,
|
||||
left = False,
|
||||
logarithmic = False,
|
||||
biot = False,
|
||||
green = False,
|
||||
defgrad = ['f'],
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
stretches = []
|
||||
stretch = {}
|
||||
strains = []
|
||||
|
||||
if options.right: stretches.append('U')
|
||||
|
@ -61,81 +76,89 @@ if options.logarithmic: strains.append('ln')
|
|||
if options.biot: strains.append('Biot')
|
||||
if options.green: strains.append('Green')
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
if options.defgrad == None:
|
||||
parser.error('no data column specified.')
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
errors = []
|
||||
active = []
|
||||
for i,length in enumerate(table.label_dimension(options.defgrad)):
|
||||
if length == 9:
|
||||
active.append(options.defgrad[i])
|
||||
else:
|
||||
errors.append('no deformation gradient tensor (1..9_%s) found...'%options.defgrad[i])
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors)+'\n')
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.defgrad, 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
||||
else:
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
for theStretch in stretches:
|
||||
for theStrain in strains:
|
||||
table.labels_append(['{}_{}({}){}'.format(i+1, # extend ASCII header with new labels
|
||||
theStrain,
|
||||
theStretch,
|
||||
label if label != 'f' else '') for i in xrange(9)])
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
for label in active:
|
||||
for theStretch in stretches:
|
||||
for theStrain in strains:
|
||||
table.labels_append(['%i_%s(%s)%s'%(i+1,
|
||||
theStrain,
|
||||
theStretch,
|
||||
label if label != 'f' else '') for i in xrange(9)]) # extend ASCII header with new labels
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
stretch = {}
|
||||
outputAlive = True
|
||||
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for column in table.label_index(active): # loop over all requested norms
|
||||
F = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3)
|
||||
(U,S,Vh) = np.linalg.svd(F)
|
||||
R = np.dot(U,Vh)
|
||||
stretch['U'] = np.dot(np.linalg.inv(R),F)
|
||||
stretch['V'] = np.dot(F,np.linalg.inv(R))
|
||||
for column in items['tensor']['column']: # loop over all requested defgrads
|
||||
F = np.array(map(float,table.data[column:column+items['tensor']['dim']]),'d').reshape(items['tensor']['shape'])
|
||||
(U,S,Vh) = np.linalg.svd(F) # singular value decomposition
|
||||
R = np.dot(U,Vh) # rotation of polar decomposition
|
||||
stretch['U'] = np.dot(np.linalg.inv(R),F) # F = RU
|
||||
stretch['V'] = np.dot(F,np.linalg.inv(R)) # F = VR
|
||||
|
||||
for theStretch in stretches:
|
||||
for i in xrange(9):
|
||||
if abs(stretch[theStretch][i%3,i//3]) < 1e-12: # kill nasty noisy data
|
||||
stretch[theStretch][i%3,i//3] = 0.0
|
||||
stretch[theStretch] = np.where(abs(stretch[theStretch]) < 1e-12, 0, stretch[theStretch])) # kill nasty noisy data
|
||||
(D,V) = np.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix)
|
||||
neg = np.where(D < 0.0) # find negative eigenvalues ...
|
||||
D[neg] *= -1. # ... flip value ...
|
||||
V[:,neg] *= -1. # ... and vector
|
||||
for i,eigval in enumerate(D):
|
||||
if eigval < 0.0: # flip negative eigenvalues
|
||||
D[i] = -D[i]
|
||||
V[:,i] = -V[:,i]
|
||||
if np.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality
|
||||
V[:,(i+1)%3] = np.cross(V[:,(i+2)%3],V[:,i]) # correct next vector
|
||||
V[:,(i+1)%3] /= np.sqrt(np.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?)
|
||||
if np.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality
|
||||
V[:,(i+1)%3] = np.cross(V[:,(i+2)%3],V[:,i]) # correct next vector
|
||||
V[:,(i+1)%3] /= np.sqrt(np.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?)
|
||||
for theStrain in strains:
|
||||
d = operator(theStretch,theStrain,D) # operate on eigenvalues of U or V
|
||||
eps = (np.dot(V,np.dot(np.diag(d),V.T)).real).reshape(9) # build tensor back from eigenvalue/vector basis
|
||||
|
||||
table.data_append(list(eps))
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
table.close() # close ASCII table
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -21,41 +21,39 @@ Examples:
|
|||
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', type="string", metavar='string',
|
||||
help='column label for grouping rows')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for grouping rows')
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.label == None:
|
||||
parser.error('No sorting column specified.')
|
||||
parser.error('no grouping column specified.')
|
||||
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name,
|
||||
outname = options.label+'_averaged_'+name,
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
# ------------------------------------------ sanity checks ---------------------------------------
|
||||
|
||||
table.head_read()
|
||||
if table.label_dimension(options.label) != 1:
|
||||
file['croak'].write('column {0} is not of scalar dimension...\n'.format(options.label))
|
||||
table.croak('column {} is not of scalar dimension.'.format(options.label))
|
||||
table.close(dismiss = True) # close ASCIItable and remove empty file
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble info ---------------------------------------
|
||||
|
||||
# ------------------------------------------ assemble header -----------------------------
|
||||
|
||||
table.info_append(string.replace(scriptID,'\n','\\n') + \
|
||||
'\t' + ' '.join(sys.argv[1:]))
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data --------------------------------
|
||||
|
@ -65,7 +63,7 @@ for name in filenames:
|
|||
|
||||
table.data = table.data[np.lexsort([table.data[:,table.label_index(options.label)]])]
|
||||
|
||||
values,index = np.unique(table.data[:,table.label_index(options.label)], return_index=True)
|
||||
values,index = np.unique(table.data[:,table.label_index(options.label)], return_index = True)
|
||||
index = np.append(index,rows)
|
||||
avgTable = np.empty((len(values), cols))
|
||||
|
||||
|
@ -78,8 +76,4 @@ for name in filenames:
|
|||
# ------------------------------------------ output result -------------------------------
|
||||
|
||||
table.data_writeArray()
|
||||
table.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
table.close() # close ASCII table
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',options.label+'_averaged_'+file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII table
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os,re,sys,string,fnmatch,numpy as np
|
||||
import os,re,sys,string,fnmatch,math,random,numpy as np
|
||||
from optparse import OptionParser
|
||||
import damask
|
||||
|
||||
|
@ -21,32 +21,39 @@ All rows where label 'foo' equals 'bar' -- " #foo# == \"bar\" "
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-w','--white', dest='whitelist', action='extend', metavar='<string LIST>',
|
||||
help='white list of column labels (a,b,c,...)')
|
||||
parser.add_option('-b','--black', dest='blacklist', action='extend', metavar='<string LIST>',
|
||||
help='black list of column labels (a,b,c,...)')
|
||||
parser.add_option('-c','--condition', dest='condition', metavar='string',
|
||||
help='condition to filter rows')
|
||||
parser.set_defaults(condition = '')
|
||||
parser.add_option('-w','--white',
|
||||
dest = 'whitelist',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'whitelist of column labels (a,b,c,...)')
|
||||
parser.add_option('-b','--black',
|
||||
dest = 'blacklist',
|
||||
action = 'extend', metavar='<string LIST>',
|
||||
help = 'blacklist of column labels (a,b,c,...)')
|
||||
parser.add_option('-c','--condition',
|
||||
dest = 'condition', metavar='string',
|
||||
help = 'condition to filter rows')
|
||||
|
||||
parser.set_defaults(condition = '',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
#--- loop over input files -------------------------------------------------------------------------
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ assemble info ---------------------------------------
|
||||
|
||||
table.head_read()
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info
|
||||
|
||||
# ------------------------------------------ process data ---------------------------------------
|
||||
|
||||
specials = { \
|
||||
'_row_': 0,
|
||||
|
@ -55,18 +62,21 @@ for name in filenames:
|
|||
positions = []
|
||||
|
||||
for position,label in enumerate(table.labels):
|
||||
if (options.whitelist == None or any([fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
|
||||
and (options.blacklist == None or not any([fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
|
||||
if (options.whitelist == None or any([ position in table.label_indexrange(needle) \
|
||||
or fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
|
||||
and (options.blacklist == None or not any([ position in table.label_indexrange(needle) \
|
||||
or fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
|
||||
labels.append(label) # remember name...
|
||||
positions.append(position) # ...and position
|
||||
|
||||
if options.whitelist != None and options.blacklist == None: # check whether reordering is possible
|
||||
if len(labels) > 0 and options.whitelist != None and options.blacklist == None: # check whether reordering is possible
|
||||
position = np.zeros(len(labels))
|
||||
for i,label in enumerate(labels): # check each selected label
|
||||
match = [fnmatch.fnmatch(label,needle) for needle in options.whitelist] # which whitelist items do match it
|
||||
match = [ positions[i] in table.label_indexrange(needle) \
|
||||
or fnmatch.fnmatch(label,needle) for needle in options.whitelist] # which whitelist items do match it
|
||||
position[i] = match.index(True) if np.sum(match) == 1 else -1 # unique match --> store which
|
||||
|
||||
sorted = np.argsort(position)
|
||||
sorted = np.lexsort((labels,position))
|
||||
order = range(len(labels)) if sorted[0] < 0 else sorted # skip reordering if non-unique, i.e. first sorted is "-1"
|
||||
else:
|
||||
order = range(len(labels)) # maintain original order of labels
|
||||
|
@ -90,10 +100,13 @@ for name in filenames:
|
|||
evaluator = "'" + condition + "'.format(" + ','.join(interpolator) + ")"
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
table.labels = np.array(labels)[order] # update with new label set
|
||||
|
||||
table.labels_clear()
|
||||
table.labels_append(np.array(labels)[order]) # update with new label set
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
# ------------------------------------------ process and output data ------------------------------------------
|
||||
|
||||
positions = np.array(positions)[order]
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
|
@ -102,10 +115,8 @@ for name in filenames:
|
|||
table.data = [table.data[position] for position in positions] # retain filtered columns
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ finalize output -----------------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close input ASCII table (works for stdin)
|
||||
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -19,36 +19,66 @@ Generate PNG image from data in given column (or 2D data of overall table).
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', type='string',
|
||||
help='column containing data [all])')
|
||||
parser.add_option('-r','--range', dest='range', type='float', nargs=2,
|
||||
help='data range (min max) [auto]')
|
||||
parser.add_option('--gap', '--transparent', dest='gap', type='float',
|
||||
help='value to treat as transparent [%default]')
|
||||
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=2,
|
||||
help='data dimension (width height) [native]')
|
||||
parser.add_option('--abs', dest='abs', action='store_true',
|
||||
help='magnitude of values')
|
||||
parser.add_option('--log', dest='log', action='store_true',
|
||||
help='log10 of values')
|
||||
parser.add_option('--fliplr', dest='flipLR', action='store_true',
|
||||
help='flip around vertical axis')
|
||||
parser.add_option('--flipud', dest='flipUD', action='store_true',
|
||||
help='flip around horizontal axis')
|
||||
parser.add_option('--color', dest='color', type='string',
|
||||
help='color scheme')
|
||||
parser.add_option('--invert', dest='invert', action='store_true',
|
||||
help='invert color scheme')
|
||||
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar='LEFT RIGHT TOP BOTTOM',
|
||||
help='pixels cropped on left, right, top, bottom')
|
||||
parser.add_option('--show', dest='show', action='store_true',
|
||||
help='show resulting image')
|
||||
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int',
|
||||
help='pixel per data point')
|
||||
parser.add_option('-x','--pixelsizex', dest='pixelsizex', type='int',
|
||||
help='pixel per data point along x')
|
||||
parser.add_option('-y','--pixelsizey', dest='pixelsizey', type='int',
|
||||
help='pixel per data point along y')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column containing data [all]')
|
||||
parser.add_option('-r','--range',
|
||||
dest = 'range',
|
||||
type = 'float', nargs = 2, metavar = 'float float',
|
||||
help = 'data range (min max) [auto]')
|
||||
parser.add_option('--gap', '--transparent',
|
||||
dest = 'gap',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'value to treat as transparent [%default]')
|
||||
parser.add_option('-d','--dimension',
|
||||
dest = 'dimension',
|
||||
type = 'int', nargs = 2, metavar = 'int int',
|
||||
help = 'data dimension (width height) [native]')
|
||||
parser.add_option('--color',
|
||||
dest = 'color',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'color scheme [%default]')
|
||||
parser.add_option('--invert',
|
||||
dest = 'invert',
|
||||
action = 'store_true',
|
||||
help = 'invert color scheme')
|
||||
parser.add_option('--abs',
|
||||
dest = 'abs',
|
||||
action = 'store_true',
|
||||
help = 'magnitude of values')
|
||||
parser.add_option('--log',
|
||||
dest = 'log',
|
||||
action = 'store_true',
|
||||
help = 'log10 of values')
|
||||
parser.add_option('--fliplr',
|
||||
dest = 'flipLR',
|
||||
action = 'store_true',
|
||||
help = 'flip around vertical axis')
|
||||
parser.add_option('--flipud',
|
||||
dest = 'flipUD',
|
||||
action = 'store_true',
|
||||
help = 'flip around horizontal axis')
|
||||
parser.add_option('--crop',
|
||||
dest = 'crop',
|
||||
type = 'int', nargs = 4, metavar = 'int int int int',
|
||||
help = 'pixels cropped on left, right, top, bottom')
|
||||
parser.add_option('-N','--pixelsize',
|
||||
dest = 'pixelsize',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixel per data point')
|
||||
parser.add_option('-x','--pixelsizex',
|
||||
dest = 'pixelsizex',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixel per data point along x')
|
||||
parser.add_option('-y','--pixelsizey',
|
||||
dest = 'pixelsizey',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixel per data point along y')
|
||||
parser.add_option('--show',
|
||||
dest = 'show',
|
||||
action = 'store_true',
|
||||
help = 'show resulting image')
|
||||
|
||||
parser.set_defaults(label = None,
|
||||
range = [0.0,0.0],
|
||||
|
@ -73,38 +103,32 @@ if options.pixelsize > 1: (options.pixelsizex,options.pixelsizey) = [options.pix
|
|||
|
||||
# --- color palette ---------------------------------------------------------------------------------
|
||||
|
||||
theMap = damask.Colormap(predefined=options.color)
|
||||
theMap = damask.Colormap(predefined = options.color)
|
||||
if options.invert: theMap = theMap.invert()
|
||||
theColors = np.uint8(np.array(theMap.export(format='list',steps=256))*255)
|
||||
theColors = np.uint8(np.array(theMap.export(format = 'list',steps = 256))*255)
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name,
|
||||
'input':open(name),
|
||||
'output':open(os.path.splitext(name)[0]+\
|
||||
('_%s'%(options.label) if options.label != None else '')+\
|
||||
'.png','w'),
|
||||
'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name,
|
||||
outname = None,
|
||||
buffered = False,
|
||||
labeled = options.label != None,
|
||||
readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],
|
||||
buffered = False, # make unbuffered ASCII_table
|
||||
labels = options.label != None) # no labels when taking 2D dataset
|
||||
table.head_read() # read ASCII header info
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
missing_labels = table.data_readArray(options.label)
|
||||
if len(missing_labels) > 0:
|
||||
file['croak'].write('column %s not found...\n'%options.label)
|
||||
table.croak('column {} not found.'.format(options.label))
|
||||
table.close(dismiss = True) # close ASCIItable and remove empty file
|
||||
continue
|
||||
|
||||
|
@ -115,11 +139,11 @@ for name in filenames:
|
|||
if options.flipLR: table.data = np.fliplr(table.data)
|
||||
if options.flipUD: table.data = np.flipud(table.data)
|
||||
|
||||
mask = np.where(table.data != options.gap,True,False) if options.gap != None else np.ones_like(table.data,dtype='bool')
|
||||
mask = np.where(table.data != options.gap,True,False) if options.gap != None else np.ones_like(table.data,dtype = 'bool')
|
||||
if np.all(np.array(options.range) == 0.0):
|
||||
options.range = [table.data[mask].min(),
|
||||
table.data[mask].max()]
|
||||
file['croak'].write('data range: {0} – {1}\n'.format(*options.range))
|
||||
table.croak('data range: {0} – {1}'.format(*options.range))
|
||||
|
||||
delta = max(options.range) - min(options.range)
|
||||
avg = 0.5*(max(options.range) + min(options.range))
|
||||
|
@ -139,9 +163,9 @@ for name in filenames:
|
|||
repeat(options.pixelsizey,axis = 0)
|
||||
|
||||
(height,width) = table.data.shape
|
||||
file['croak'].write('image dimension: {0} x {1}\n'.format(width,height))
|
||||
table.croak('image dimension: {0} x {1}'.format(width,height))
|
||||
|
||||
im = Image.fromarray(np.dstack((theColors[np.array(255*table.data,dtype=np.uint8)],
|
||||
im = Image.fromarray(np.dstack((theColors[np.array(255*table.data,dtype = np.uint8)],
|
||||
255*mask.astype(np.uint8))), 'RGBA').\
|
||||
crop(( options.crop[0],
|
||||
options.crop[2],
|
||||
|
@ -149,8 +173,12 @@ for name in filenames:
|
|||
height-options.crop[3]))
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
im.save(file['output'],format = "PNG")
|
||||
if options.show: im.show()
|
||||
|
||||
table.input_close() # close input ASCII table
|
||||
table.output_close() # close output
|
||||
im.save(sys.stdout if name == 'STDIN' else
|
||||
os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png',
|
||||
format = "PNG")
|
||||
|
||||
table.close() # close ASCII table
|
||||
if options.show: im.show()
|
||||
|
|
|
@ -19,38 +19,61 @@ Generate PNG image from scalar data on grid deformed by (periodic) deformation g
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', type='string', metavar='string',
|
||||
help='column containing data)')
|
||||
parser.add_option('-r','--range', dest='range', type='float', nargs=2, metavar='float float',
|
||||
help='data range (min max) [auto]')
|
||||
parser.add_option('--color', dest='color', type='string', metavar='string',
|
||||
help='color scheme')
|
||||
parser.add_option('--invert', dest='invert', action='store_true',
|
||||
help='invert color scheme')
|
||||
parser.add_option('--abs', dest='abs', action='store_true',
|
||||
help='magnitude of values')
|
||||
parser.add_option('--log', dest='log', action='store_true',
|
||||
help='log10 of values')
|
||||
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=3, metavar=' '.join(['int']*3),
|
||||
help='data dimension (x/y/z)')
|
||||
parser.add_option('-s','--size', dest='size', type='float', nargs=3, metavar=' '.join(['float']*3),
|
||||
help='box size (x/y/z)')
|
||||
parser.add_option('-f','--defgrad', dest='defgrad', metavar='string',
|
||||
help='column label of deformation gradient [%default]')
|
||||
parser.add_option('--scaling', dest='scaling', type='float', nargs=3, metavar = ' '.join(['float']*3),
|
||||
help='x/y/z scaling of displacment fluctuation [%default]')
|
||||
parser.add_option('-z','--layer', dest='z', type='int', metavar='int',
|
||||
help='index of z plane to plot [%default]')
|
||||
parser.add_option('--fliplr', dest='flipLR', action='store_true',
|
||||
help='flip around vertical axis')
|
||||
parser.add_option('--flipud', dest='flipUD', action='store_true',
|
||||
help='flip around horizontal axis')
|
||||
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar=' '.join(['int']*3),
|
||||
help='pixels cropped on left, right, top, bottom')
|
||||
parser.add_option('--show', dest='show', action='store_true',
|
||||
help='show resulting image')
|
||||
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int', metavar='int',
|
||||
help='pixels per cell edge')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column containing data [all]')
|
||||
parser.add_option('-r','--range',
|
||||
dest = 'range',
|
||||
type = 'float', nargs = 2, metavar = 'float float',
|
||||
help = 'data range (min max) [auto]')
|
||||
parser.add_option('--gap', '--transparent',
|
||||
dest = 'gap',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'value to treat as transparent [%default]')
|
||||
parser.add_option('-d','--dimension',
|
||||
dest = 'dimension',
|
||||
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
|
||||
help = 'data dimension (x/y/z)')
|
||||
parser.add_option('-s','--size',
|
||||
dest = 'size',
|
||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
||||
help = 'box size (x/y/z)')
|
||||
parser.add_option('-f','--defgrad',
|
||||
dest = 'defgrad', metavar = 'string',
|
||||
help = 'column label of deformation gradient [%default]')
|
||||
parser.add_option('--scaling',
|
||||
dest = 'scaling',
|
||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
||||
help = 'x/y/z scaling of displacement fluctuation [%default]')
|
||||
parser.add_option('-z','--layer',
|
||||
dest = 'z',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'index of z plane to plot [%default]')
|
||||
parser.add_option('--color',
|
||||
dest = 'color',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'color scheme')
|
||||
parser.add_option('--invert',
|
||||
dest = 'invert',
|
||||
action = 'store_true',
|
||||
help = 'invert color scheme')
|
||||
parser.add_option('--abs',
|
||||
dest = 'abs',
|
||||
action = 'store_true',
|
||||
help = 'magnitude of values')
|
||||
parser.add_option('--log',
|
||||
dest = 'log',
|
||||
action = 'store_true',
|
||||
help = 'log10 of values')
|
||||
parser.add_option('-N','--pixelsize',
|
||||
dest = 'pixelsize',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixels per cell edge')
|
||||
parser.add_option('--show',
|
||||
dest = 'show',
|
||||
action = 'store_true',
|
||||
help = 'show resulting image')
|
||||
|
||||
parser.set_defaults(label = None,
|
||||
range = [0.0,0.0],
|
||||
|
@ -61,11 +84,8 @@ parser.set_defaults(label = None,
|
|||
log = False,
|
||||
defgrad = 'f',
|
||||
scaling = [1.,1.,1.],
|
||||
flipLR = False,
|
||||
flipUD = False,
|
||||
color = "gray",
|
||||
invert = False,
|
||||
crop = [0,0,0,0],
|
||||
pixelsize = 1,
|
||||
show = False,
|
||||
)
|
||||
|
@ -86,35 +106,28 @@ theColors = np.uint8(np.array(theMap.export(format='list',steps=256))*255)
|
|||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name,
|
||||
'input':open(name),
|
||||
'output':open(os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png','w'),
|
||||
'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],
|
||||
buffered = False, # make unbuffered ASCII_table
|
||||
labels = options.label != None) # no labels when taking 2D dataset
|
||||
table.head_read() # read ASCII header info
|
||||
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name,
|
||||
outname = None,
|
||||
buffered = False,
|
||||
labeled = options.label != None,
|
||||
readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
|
||||
errors = []
|
||||
if table.label_dimension(options.label) != 1:
|
||||
errors.append('no scalar data (%s) found...'%options.label)
|
||||
errors.append('no scalar data ({}) found.'.format(options.label))
|
||||
if table.label_dimension(options.defgrad) != 9:
|
||||
errors.append('no deformation gradient tensor (1..9_%s) found...'%options.defgrad)
|
||||
errors.append('no deformation gradient tensor (1..9_{}) found.'.format(options.defgrad))
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors)+'\n')
|
||||
|
@ -123,8 +136,8 @@ for name in filenames:
|
|||
|
||||
table.data_readArray([options.label,options.defgrad])
|
||||
|
||||
F = table.data[:,1:10].transpose().reshape([3,3]+list(options.dimension),order='F')
|
||||
data = table.data[:,0 ].transpose().reshape( list(options.dimension),order='F')
|
||||
F = table.data[:,1:10].transpose().reshape([3,3]+list(options.dimension),order='F')
|
||||
|
||||
if options.abs: data = np.abs(data)
|
||||
if options.log: data = np.log10(data)
|
||||
|
@ -165,24 +178,17 @@ for name in filenames:
|
|||
nodes[0,x ,y+1,options.z],
|
||||
nodes[1,x ,y+1,options.z],
|
||||
],
|
||||
fill = tuple(theColors[int(255*data[x,y,options.z])]),
|
||||
fill = tuple(theColors[int(255*data[x,y,options.z])],
|
||||
0 if data[x,y,options.z] == options.gap else 255),
|
||||
outline = None)
|
||||
|
||||
# if options.flipLR: table.data = np.fliplr(table.data)
|
||||
# if options.flipUD: table.data = np.flipud(table.data)
|
||||
|
||||
|
||||
# (height,width,bands) = table.data.shape
|
||||
|
||||
# im = Image.fromarray(table.data.astype('uint8'), 'RGB').\
|
||||
# crop(( options.crop[0],
|
||||
# options.crop[2],
|
||||
# width -options.crop[1],
|
||||
# height-options.crop[3]))
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
im.save(file['output'],format = "PNG")
|
||||
|
||||
im.save(sys.stdout if name == 'STDIN' else
|
||||
os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png',
|
||||
format = "PNG")
|
||||
|
||||
table.close() # close ASCII table
|
||||
if options.show: im.show()
|
||||
|
||||
table.close() # close ASCII table file handles
|
||||
|
|
|
@ -19,24 +19,42 @@ Generate PNG image from data in given column vector containing RGB tuples.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', type='string',
|
||||
help='column containing RGB triplet')
|
||||
parser.add_option('-d','--dimension', dest='dimension', type='int', nargs=2,
|
||||
help='data dimension (width height)')
|
||||
parser.add_option('--fliplr', dest='flipLR', action='store_true',
|
||||
help='flip around vertical axis')
|
||||
parser.add_option('--flipud', dest='flipUD', action='store_true',
|
||||
help='flip around horizontal axis')
|
||||
parser.add_option('--crop', dest='crop', type='int', nargs=4, metavar=' '.join(['int']*4),
|
||||
help='pixels cropped on left, right, top, bottom')
|
||||
parser.add_option('--show', dest='show', action='store_true',
|
||||
help='show resulting image')
|
||||
parser.add_option('-N','--pixelsize', dest='pixelsize', type='int',
|
||||
help='pixels per data point')
|
||||
parser.add_option('-x','--pixelsizex', dest='pixelsizex', type='int',
|
||||
help='pixels per data point along x')
|
||||
parser.add_option('-y','--pixelsizey', dest='pixelsizey', type='int',
|
||||
help='pixels per data point along y')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column containing RGB triplet')
|
||||
parser.add_option('-d','--dimension',
|
||||
dest = 'dimension',
|
||||
type = 'int', nargs = 2, metavar = 'int int',
|
||||
help = 'data dimension (width height)')
|
||||
parser.add_option('--fliplr',
|
||||
dest = 'flipLR',
|
||||
action = 'store_true',
|
||||
help = 'flip around vertical axis')
|
||||
parser.add_option('--flipud',
|
||||
dest = 'flipUD',
|
||||
action = 'store_true',
|
||||
help = 'flip around horizontal axis')
|
||||
parser.add_option('--crop',
|
||||
dest = 'crop',
|
||||
type = 'int', nargs = 4, metavar = ' '.join(['int']*4),
|
||||
help = 'pixels cropped on left, right, top, bottom')
|
||||
parser.add_option('-N','--pixelsize',
|
||||
dest = 'pixelsize',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixels per data point')
|
||||
parser.add_option('-x','--pixelsizex',
|
||||
dest = 'pixelsizex',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixels per data point along x')
|
||||
parser.add_option('-y','--pixelsizey',
|
||||
dest = 'pixelsizey',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'pixels per data point along y')
|
||||
parser.add_option('--show',
|
||||
dest = 'show',
|
||||
action = 'store_true',
|
||||
help = 'show resulting image')
|
||||
|
||||
parser.set_defaults(label = None,
|
||||
dimension = [],
|
||||
|
@ -55,27 +73,21 @@ if options.dimension == []: parser.error('dimension of data array missing')
|
|||
if options.pixelsize > 1: (options.pixelsizex,options.pixelsizey) = [options.pixelsize]*2
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name,
|
||||
'input':open(name),
|
||||
'output':open(os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png','w'),
|
||||
'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name,
|
||||
outname = None,
|
||||
buffered = False,
|
||||
labeled = options.label != None,
|
||||
readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],
|
||||
buffered = False, # make unbuffered ASCII_table
|
||||
labels = True)
|
||||
table.head_read() # read ASCII header info
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
|
@ -83,13 +95,13 @@ for name in filenames:
|
|||
|
||||
missing_labels = table.data_readArray(options.label)
|
||||
if len(missing_labels) > 0:
|
||||
errors.append('column%s %s not found'%('s' if len(missing_labels) > 1 else '',
|
||||
', '.join(missing_labels)))
|
||||
errors.append('column{} {} not found'.format('s' if len(missing_labels) > 1 else '',
|
||||
', '.join(missing_labels)))
|
||||
if table.label_dimension(options.label) != 3:
|
||||
errors.append('column %s has wrong dimension'%options.label)
|
||||
errors.append('column {} has wrong dimension'.format(options.label))
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors))
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True) # close ASCII table file handles and delete output file
|
||||
continue
|
||||
|
||||
|
@ -98,13 +110,13 @@ for name in filenames:
|
|||
if options.flipLR: table.data = np.fliplr(table.data)
|
||||
if options.flipUD: table.data = np.flipud(table.data)
|
||||
|
||||
table.data = table.data.\
|
||||
repeat(options.pixelsizex,axis=1).\
|
||||
table.data = table.data.repeat(options.pixelsizex,axis=1).\
|
||||
repeat(options.pixelsizey,axis=0)
|
||||
|
||||
table.data *= 1. if np.any(table.data > 1.0) else 255.0 # ensure 8 bit data range
|
||||
|
||||
(height,width,bands) = table.data.shape
|
||||
table.croak('image dimension: {0} x {1}'.format(width,height))
|
||||
|
||||
im = Image.fromarray(table.data.astype('uint8'), 'RGB').\
|
||||
crop(( options.crop[0],
|
||||
|
@ -114,7 +126,11 @@ for name in filenames:
|
|||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
|
||||
im.save(file['output'],format = "PNG")
|
||||
if options.show: im.show()
|
||||
im.save(sys.stdout if name == 'STDIN' else
|
||||
os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png',
|
||||
format = "PNG")
|
||||
|
||||
table.close() # close ASCII table file handles
|
||||
table.close() # close ASCII table
|
||||
if options.show: im.show()
|
||||
|
|
|
@ -18,64 +18,80 @@ Permute all values in given column(s).
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', action='extend', metavar='<string LIST>',
|
||||
help='heading(s) of column to permute')
|
||||
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int',
|
||||
help='seed of random number generator [%default]')
|
||||
parser.set_defaults(randomSeed = None)
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help ='column(s) to permute')
|
||||
parser.add_option('-r', '--rnd',
|
||||
dest = 'randomSeed',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'seed of random number generator [%default]')
|
||||
|
||||
parser.set_defaults(label = [],
|
||||
randomSeed = None,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.label == None:
|
||||
parser.error('no data column specified...')
|
||||
if len(options.label) == 0:
|
||||
parser.error('no labels specified.')
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase
|
||||
np.random.seed(randomSeed)
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.info_append('random seed %i'%randomSeed)
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
active = []
|
||||
column = {}
|
||||
table.head_read()
|
||||
|
||||
for label in options.label:
|
||||
if label in table.labels:
|
||||
active.append(label)
|
||||
column[label] = table.labels.index(label) # remember columns of requested data
|
||||
# ------------------------------------------ process labels ---------------------------------------
|
||||
|
||||
errors = []
|
||||
remarks = []
|
||||
columns = []
|
||||
dims = []
|
||||
|
||||
indices = table.label_index (options.label)
|
||||
dimensions = table.label_dimension(options.label)
|
||||
for i,index in enumerate(indices):
|
||||
if index == -1: remarks.append('label {} not present...'.format(options.label[i]))
|
||||
else:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
columns.append(index)
|
||||
dims.append(dimensions[i])
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file
|
||||
np.random.seed(randomSeed)
|
||||
|
||||
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),
|
||||
'random seed {}'.format(randomSeed),
|
||||
])
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
permutation = {}
|
||||
table.data_readArray(active)
|
||||
for i,label in enumerate(active):
|
||||
unique = list(set(table.data[:,i]))
|
||||
permutated = np.random.permutation(unique)
|
||||
permutation[label] = dict(zip(unique,permutated))
|
||||
|
||||
table.data_rewind()
|
||||
table.head_read() # read ASCII header info again to get the completed data
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
for label in active: # loop over all requested stiffnesses
|
||||
table.data[column[label]] = permutation[label][float(table.data[column[label]])] # apply permutation
|
||||
|
||||
outputAlive = table.data_write() # output processed line
|
||||
table.data_readArray() # read all data at once
|
||||
for col,dim in zip(columns,dims):
|
||||
table.data[:,col:col+dim] = np.random.permutation(table.data[:,col:col+dim])
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
table.input_close() # close input ASCII table
|
||||
table.output_close() # close output ASCII table
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.data_writeArray()
|
||||
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -17,71 +17,72 @@ Rename scalar, vectorial, and/or tensorial data header labels.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-l','--label', dest='label', action='extend', metavar='<string LIST>',
|
||||
help='column(s) to rename')
|
||||
parser.add_option('-s','--substitute', dest='substitute', action='extend', metavar='<string LIST>',
|
||||
help='new column label')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar='<string LIST>',
|
||||
help = 'column(s) to rename')
|
||||
parser.add_option('-s','--substitute',
|
||||
dest = 'substitute',
|
||||
action = 'extend', metavar='<string LIST>',
|
||||
help = 'new column label(s)')
|
||||
|
||||
parser.set_defaults(label = [])
|
||||
parser.set_defaults(substitute = [])
|
||||
parser.set_defaults(label = [],
|
||||
substitute = [],
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read()
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info
|
||||
|
||||
# ------------------------------------------ process labels ---------------------------------------
|
||||
|
||||
errors = []
|
||||
errors = []
|
||||
remarks = []
|
||||
|
||||
if options.label == []:
|
||||
errors.append('no labels specified...')
|
||||
if len(options.label) == 0:
|
||||
errors.append('no labels specified.')
|
||||
elif len(options.label) != len(options.substitute):
|
||||
errors.append('mismatch between number of labels ({0}) and substitutes ({1})...'.format(len(options.label),
|
||||
len(options.substitute)))
|
||||
else: # tag individual candidates
|
||||
errors.append('mismatch between number of labels ({}) and substitutes ({}).'.format(len(options.label),
|
||||
len(options.substitute)))
|
||||
else:
|
||||
indices = table.label_index (options.label)
|
||||
dimensions = table.label_dimension(options.label)
|
||||
for i,index in enumerate(indices):
|
||||
if index == -1:
|
||||
errors.append('label %s not present...\n'%options.label[i])
|
||||
if index == -1: remarks.append('label {} not present...'.format(options.label[i]))
|
||||
else:
|
||||
for j in xrange(dimensions[i]):
|
||||
table.labels[index+j] = table.labels[index+j].replace(options.label[i],options.substitute[i])
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors)+'\n')
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ---------------------------------------
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.close() # close ASCII tables
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -19,64 +19,77 @@ Rotate vector and/or tensor column data by given angle around given axis.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '<string LIST>',
|
||||
help = 'column heading of vector to rotate')
|
||||
parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '<string LIST>',
|
||||
help = 'column heading of tensor to rotate')
|
||||
parser.add_option('-r', '--rotation',dest = 'rotation', type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
||||
parser.add_option('-v','--vector',
|
||||
dest = 'vector',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'column heading of vector(s) to rotate')
|
||||
parser.add_option('-t','--tensor',
|
||||
dest = 'tensor',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'column heading of tensor(s) to rotate')
|
||||
parser.add_option('-r', '--rotation',
|
||||
dest = 'rotation',
|
||||
type = 'float', nargs = 4, metavar = ' '.join(['float']*4),
|
||||
help = 'angle and axis to rotate data [%default]')
|
||||
parser.add_option('-d', '--degrees', dest = 'degrees', action = 'store_true',
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'angles are given in degrees [%default]')
|
||||
parser.set_defaults(rotation = (0.,1.,1.,1.)) # no rotation about 1,1,1
|
||||
parser.set_defaults(degrees = False)
|
||||
|
||||
parser.set_defaults(rotation = (0.,1.,1.,1.), # no rotation about 1,1,1
|
||||
degrees = False,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
|
||||
if options.vector != None: datainfo['vector']['label'] += options.vector
|
||||
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
|
||||
if options.vector == None and options.tensor == None:
|
||||
parser.error('no data column specified.')
|
||||
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:])
|
||||
R = r.asMatrix()
|
||||
q = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rotation[1:])
|
||||
R = q.asMatrix()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
active = defaultdict(list)
|
||||
column = defaultdict(dict)
|
||||
table.head_read()
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_'+label
|
||||
if key in table.labels:
|
||||
active[datatype].append(label)
|
||||
column[datatype][label] = table.labels.index(key) # remember columns of requested data
|
||||
# ------------------------------------------ sanity checks ----------------------------------------
|
||||
|
||||
items = {
|
||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
|
||||
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
|
||||
}
|
||||
errors = []
|
||||
remarks = []
|
||||
column = {}
|
||||
|
||||
for type, data in items.iteritems():
|
||||
for what in data['labels']:
|
||||
dim = table.label_dimension(what)
|
||||
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
|
||||
else:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
items[type]['active'].append(what)
|
||||
items[type]['column'].append(table.label_index(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header --------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
@ -85,28 +98,21 @@ for name in filenames:
|
|||
|
||||
datatype = 'vector'
|
||||
|
||||
for label in active[datatype] if datatype in active else []: # loop over all requested labels
|
||||
table.data[column[datatype][label]:column[datatype][label]+datainfo[datatype]['len']] = \
|
||||
r * np.array(map(float,
|
||||
table.data[column[datatype][label]:\
|
||||
column[datatype][label]+datainfo[datatype]['len']]))
|
||||
for column in items[datatype]['column']: # loop over all requested labels
|
||||
table.data[column:column+items[datatype]['dim']] = \
|
||||
r * np.array(map(float,table.data[column:column+items[datatype]['dim']]))
|
||||
|
||||
datatype = 'tensor'
|
||||
|
||||
for label in active[datatype] if datatype in active else []: # loop over all requested labels
|
||||
A = np.array(map(float,table.data[column[datatype][label]:\
|
||||
column[datatype][label]+datainfo[datatype]['len']])).\
|
||||
reshape(np.sqrt(datainfo[datatype]['len']),
|
||||
np.sqrt(datainfo[datatype]['len']))
|
||||
table.data[column[datatype][label]:\
|
||||
column[datatype][label]+datainfo[datatype]['len']] = \
|
||||
np.dot(R,np.dot(A,R.transpose())).reshape(datainfo[datatype]['len'])
|
||||
for column in items[datatype]['column']: # loop over all requested labels
|
||||
table.data[column:column+items[datatype]['dim']] = \
|
||||
np.dot(R,np.dot(np.array(map(float,table.data[column:column+items[datatype]['dim']])).\
|
||||
reshape(items[datatype]['shape']),R.transpose()
|
||||
).reshape(items[datatype]['dim'])
|
||||
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush()
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -15,94 +15,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
# --------------------------------------------------------------------
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Uniformly scale values in scalar/special, vector, or tensor columns by given factor.
|
||||
Uniformly scale column values by given factor.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing field values of special dimension')
|
||||
parser.add_option('-d','--dimension',dest='N', type='int', metavar='int',
|
||||
help='dimension of special field values [%default]')
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='column heading of vector to scale')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='column heading of tensor to scale')
|
||||
parser.add_option('-f','--factor', dest='factor', action='extend', metavar='<float LIST>',
|
||||
help='list of scalar/special, vector, and tensor scaling factors (in this order!)')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help ='column(s) to scale')
|
||||
parser.add_option('-f','--factor',
|
||||
dest = 'factor',
|
||||
action = 'extend', metavar='<float LIST>',
|
||||
help = 'factor(s) per column')
|
||||
|
||||
parser.set_defaults(special = [])
|
||||
parser.set_defaults(vector = [])
|
||||
parser.set_defaults(tensor = [])
|
||||
parser.set_defaults(factor = [])
|
||||
parser.set_defaults(N = 1)
|
||||
parser.set_defaults(label = [],
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
options.factor = np.array(options.factor,'d')
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'special': {'len':options.N,
|
||||
'label':[]},
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
|
||||
length = 0
|
||||
if options.special != []: datainfo['special']['label'] += options.special; length += len(options.special)
|
||||
if options.vector != []: datainfo['vector']['label'] += options.vector; length += len(options.vector)
|
||||
if options.tensor != []: datainfo['tensor']['label'] += options.tensor; length += len(options.tensor)
|
||||
if len(options.factor) != length:
|
||||
parser.error('length of scaling vector does not match column count...')
|
||||
if len(options.label) != len(options.factor):
|
||||
parser.error('number of column labels and factors do not match.')
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
active = defaultdict(list)
|
||||
column = defaultdict(dict)
|
||||
table.head_read()
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_'+label if info['len'] > 1 else label
|
||||
if key in table.labels:
|
||||
active[datatype].append(label)
|
||||
column[datatype][label] = table.labels.index(key) # remember columns of requested data
|
||||
else:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
errors = []
|
||||
remarks = []
|
||||
columns = []
|
||||
dims = []
|
||||
factors = []
|
||||
|
||||
for what,factor in zip(options.label,options.factor):
|
||||
col = table.label_index(what)
|
||||
if col < 0: remarks.append('column {} not found...'.format(what,type))
|
||||
else:
|
||||
columns.append(col)
|
||||
factors.append(float(factor))
|
||||
dims.append(table.label_dimension(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
i = 0
|
||||
for datatype,labels in sorted(active.items(),key=lambda x:datainfo[x[0]]['len']): # loop over special,vector,tensor
|
||||
for label in labels: # loop over all requested labels
|
||||
for j in xrange(datainfo[datatype]['len']): # loop over entity elements
|
||||
table.data[column[datatype][label]+j] = float(table.data[column[datatype][label]+j]) * options.factor[i]
|
||||
i += 1
|
||||
for col,dim,factor in zip(columns,dims,factors): # loop over items
|
||||
table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d')
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -7,7 +7,7 @@ from collections import defaultdict
|
|||
from optparse import OptionParser
|
||||
import damask
|
||||
|
||||
scriptID = string.replace('$Id$','\n','\\n')
|
||||
scriptID = '$Id$'
|
||||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
@ -15,94 +15,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
# --------------------------------------------------------------------
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Shift values of scalar/special, vector, or tensor columns by given offset.
|
||||
Uniformly shift column values by given offset.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-s','--special', dest='special', action='extend', metavar='<string LIST>',
|
||||
help='heading of columns containing field values of special dimension')
|
||||
parser.add_option('-d','--dimension',dest='N', type='int', metavar='int',
|
||||
help='dimension of special field values [%default]')
|
||||
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
|
||||
help='column heading to shift by vector')
|
||||
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
|
||||
help='column heading to shift by tensor')
|
||||
parser.add_option('-o','--offset', dest='delta', action='extend', metavar='<float LIST>',
|
||||
help='list of scalar/special, vector, and tensor shifts (in this order!)')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'label',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help ='column(s) to shift')
|
||||
parser.add_option('-o','--offset',
|
||||
dest = 'offset',
|
||||
action = 'extend', metavar='<float LIST>',
|
||||
help = 'offset(s) per column')
|
||||
|
||||
parser.set_defaults(special = [])
|
||||
parser.set_defaults(vector = [])
|
||||
parser.set_defaults(tensor = [])
|
||||
parser.set_defaults(delta = [])
|
||||
parser.set_defaults(N = 1)
|
||||
parser.set_defaults(label = [],
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
options.delta = np.array(options.delta,'d')
|
||||
datainfo = { # list of requested labels per datatype
|
||||
'special': {'len':options.N,
|
||||
'label':[]},
|
||||
'vector': {'len':3,
|
||||
'label':[]},
|
||||
'tensor': {'len':9,
|
||||
'label':[]},
|
||||
}
|
||||
|
||||
length = 0
|
||||
if options.special != []: datainfo['special']['label'] += options.special; length += len(options.special)
|
||||
if options.vector != []: datainfo['vector']['label'] += options.vector; length += len(options.vector)
|
||||
if options.tensor != []: datainfo['tensor']['label'] += options.tensor; length += len(options.tensor)
|
||||
if len(options.delta) != length:
|
||||
parser.error('length of offset vector does not match column types...')
|
||||
if len(options.label) != len(options.delta):
|
||||
parser.error('number of column labels and offsets do not match.')
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
# ------------------------------------------ read header ------------------------------------------
|
||||
|
||||
# --------------- figure out columns to process ---------------------------------------------------
|
||||
active = defaultdict(list)
|
||||
column = defaultdict(dict)
|
||||
table.head_read()
|
||||
|
||||
for datatype,info in datainfo.items():
|
||||
for label in info['label']:
|
||||
key = '1_'+label if info['len'] > 1 else label # non-special labels have to start with '1_'
|
||||
if key in table.labels:
|
||||
active[datatype].append(label)
|
||||
column[datatype][label] = table.labels.index(key) # remember columns of requested data
|
||||
else:
|
||||
file['croak'].write('column %s not found...\n'%label)
|
||||
errors = []
|
||||
remarks = []
|
||||
columns = []
|
||||
dims = []
|
||||
offsets = []
|
||||
|
||||
for what,factor in zip(options.label,options.offset):
|
||||
col = table.label_index(what)
|
||||
if col < 0: remarks.append('column {} not found...'.format(what,type))
|
||||
else:
|
||||
columns.append(col)
|
||||
offsets.append(float(offset))
|
||||
dims.append(table.label_dimension(what))
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
outputAlive = True
|
||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
i = 0
|
||||
for datatype,labels in sorted(active.items(),key=lambda x:datainfo[x[0]]['len']): # loop over scalar,vector,tensor
|
||||
for label in labels: # loop over all requested labels
|
||||
for j in xrange(datainfo[datatype]['len']): # loop over entity elements
|
||||
table.data[column[datatype][label]+j] = float(table.data[column[datatype][label]+j]) + options.delta[i]
|
||||
i += 1
|
||||
for col,dim,factor in zip(columns,dims,factors): # loop over items
|
||||
table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d')
|
||||
outputAlive = table.data_write() # output processed line
|
||||
|
||||
# ------------------------------------------ output result -----------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
# ------------------------------------------ output finalization -----------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table (works for stdin)
|
||||
table.output_close() # close output ASCII table (works for stdout)
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII tables
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -18,43 +18,62 @@ Show components of given ASCIItable(s).
|
|||
""", version = scriptID)
|
||||
|
||||
|
||||
parser.add_option('-a','--head', dest='head', action='store_true',
|
||||
help='output all heading (info + labels)')
|
||||
parser.add_option('-i','--info', dest='info', action='store_true',
|
||||
help='output info lines')
|
||||
parser.add_option('-l','--labels', dest='labels', action='store_true',
|
||||
help='output labels')
|
||||
parser.add_option('-d','--data', dest='data', action='store_true',
|
||||
help='output data')
|
||||
parser.add_option('-c','--column', dest='col', action='store_true',
|
||||
help='switch to label column format')
|
||||
parser.add_option('--nolabels', dest='nolabels', action='store_true',
|
||||
help='table has no labels')
|
||||
parser.set_defaults(col = False)
|
||||
parser.set_defaults(nolabels = False)
|
||||
parser.add_option('-d','--data',
|
||||
dest = 'data',
|
||||
action = 'store_true',
|
||||
help = 'output data')
|
||||
parser.add_option('-a','--head',
|
||||
dest = 'head',
|
||||
action = 'store_true',
|
||||
help = 'output all heading (info + labels)')
|
||||
parser.add_option('-i','--info',
|
||||
dest = 'info',
|
||||
action = 'store_true',
|
||||
help = 'output info lines')
|
||||
parser.add_option('-l','--labels',
|
||||
dest = 'labels',
|
||||
action = 'store_true',
|
||||
help = 'output labels')
|
||||
parser.add_option('-c','--column',
|
||||
dest = 'col',
|
||||
action = 'store_true',
|
||||
help = 'print labels as one column')
|
||||
parser.add_option('--nolabels',
|
||||
dest = 'labeled',
|
||||
action = 'store_false',
|
||||
help = 'table has no labels')
|
||||
parser.add_option('-t','--table',
|
||||
dest = 'table',
|
||||
action = 'store_true',
|
||||
help = 'output heading line for proper ASCIItable format')
|
||||
parser.set_defaults(head = False,
|
||||
info = False,
|
||||
labels = False,
|
||||
data = False,
|
||||
col = False,
|
||||
labeled = True,
|
||||
table = False,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles ---------------------------------------
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':sys.stdout, 'croak':sys.stderr})
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ extract labels ---------------------------------------
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, labeled = options.labeled, readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
# ------------------------------------------ output head ---------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False,labels=not options.nolabels)# make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
if options.head or options.info: file['output'].write('\n'.join(table.info)+'\n')
|
||||
if options.head or options.labels: file['output'].write({True:'\n',False:'\t'}[options.col].join(table.labels)+'\n')
|
||||
table.head_read()
|
||||
if not (options.head or options.info): table.info_clear()
|
||||
if not (options.head or (options.labels and options.labeled)): table.labels_clear()
|
||||
|
||||
table.head_write(header = options.table)
|
||||
|
||||
# ------------------------------------------ output data ---------------------------------------
|
||||
|
||||
|
@ -62,7 +81,4 @@ for file in files:
|
|||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||
outputAlive = table.data_write() # output line
|
||||
|
||||
outputAlive and table.output_flush()
|
||||
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close()
|
||||
table.close()
|
||||
|
|
|
@ -21,65 +21,61 @@ With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fast
|
|||
""", version = scriptID)
|
||||
|
||||
|
||||
parser.add_option('-l','--label', dest='keys', action='extend', metavar='<string LIST>',
|
||||
help='list of column labels (a,b,c,...)')
|
||||
parser.add_option('-r','--reverse', dest='reverse', action='store_true',
|
||||
help='reverse sorting')
|
||||
parser.add_option('-l','--label',
|
||||
dest = 'keys',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'list of column labels (a,b,c,...)')
|
||||
parser.add_option('-r','--reverse',
|
||||
dest = 'reverse',
|
||||
action = 'store_true',
|
||||
help = 'sort in reverse')
|
||||
|
||||
parser.set_defaults(key = [])
|
||||
parser.set_defaults(reverse = False)
|
||||
parser.set_defaults(key = [],
|
||||
reverse = False,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.keys == None:
|
||||
parser.error('No sorting column(s) specified.')
|
||||
|
||||
options.keys.reverse() # numpy sorts with most significant column as last
|
||||
options.keys.reverse() # numpy sorts with most significant column as last
|
||||
|
||||
# ------------------------------------------ setup file handles ---------------------------------------
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------------
|
||||
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.info_append(string.replace(scriptID,'\n','\\n') + \
|
||||
'\t' + ' '.join(sys.argv[1:]))
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.head_read()
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.head_write()
|
||||
|
||||
# ------------------------------------------ process data ---------------------------------------
|
||||
|
||||
table.data_readArray()
|
||||
cols = []
|
||||
for column in table.label_index(options.keys):
|
||||
cols += [table.data[:,column]]
|
||||
|
||||
ind = np.lexsort(cols)
|
||||
if options.reverse:
|
||||
ind = ind[::-1]
|
||||
|
||||
table.data = table.data[ind]
|
||||
table.data_writeArray()
|
||||
cols = []
|
||||
remarks = []
|
||||
for i,column in enumerate(table.label_index(options.keys)):
|
||||
if column < 0:
|
||||
remarks.append("label {0} not present.".format(options.keys[i]))
|
||||
else:
|
||||
cols += [table.data[:,column]]
|
||||
if remarks != []: table.croak(remarks)
|
||||
|
||||
ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0])
|
||||
if options.reverse: ind = ind[::-1]
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
|
||||
table.output_flush() # just in case of buffered ASCII table
|
||||
table.data = table.data[ind]
|
||||
table.data_writeArray()
|
||||
table.close() # close ASCII table
|
||||
|
||||
table.input_close() # close input ASCII table
|
||||
if file['name'] != 'STDIN':
|
||||
table.output_close() # close output ASCII table
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,125 +12,80 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Changes the (three-dimensional) canvas of a spectral geometry description.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-g', '--grid', dest='grid', nargs = 3, metavar=' '.join(['string']*3),
|
||||
help='a,b,c grid of hexahedral box [unchanged]')
|
||||
parser.add_option('-o', '--offset', dest='offset', type='int', nargs = 3, metavar=' '.join(['int']*3),
|
||||
help='a,b,c offset from old to new origin of grid %default')
|
||||
parser.add_option('-f', '--fill', dest='fill', type='int', metavar = 'int',
|
||||
help='(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]')
|
||||
parser.add_option('-g', '--grid',
|
||||
dest = 'grid',
|
||||
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
||||
help = 'a,b,c grid of hexahedral box [unchanged]')
|
||||
parser.add_option('-o', '--offset',
|
||||
dest = 'offset',
|
||||
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
|
||||
help = 'a,b,c offset from old to new origin of grid %default')
|
||||
parser.add_option('-f', '--fill',
|
||||
dest = 'fill',
|
||||
type = 'int', metavar = 'int',
|
||||
help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]')
|
||||
|
||||
parser.set_defaults(grid = ['0','0','0'])
|
||||
parser.set_defaults(offset = (0,0,0))
|
||||
parser.set_defaults(fill = 0)
|
||||
parser.set_defaults(grid = ['0','0','0'],
|
||||
offset = (0,0,0),
|
||||
fill = 0,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
#--- setup file handles --------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name+'_tmp','w'),
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels = False)
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
newInfo = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
'grid': np.zeros(3,'i'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue # skip blank lines
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
|
||||
i = 0
|
||||
while table.data_read():
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- do work ------------------------------------------------------------------------------------
|
||||
newInfo['grid'] = np.array([{True: int(o*float(n.translate(None,'xX'))),
|
||||
False: int(n.translate(None,'xX'))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i')
|
||||
newInfo['grid'] = np.where(newInfo['grid'] <= 0 , info['grid'],newInfo['grid'])
|
||||
newInfo['grid'] = np.where(newInfo['grid'] <= 0, info['grid'],newInfo['grid'])
|
||||
|
||||
microstructure = microstructure.reshape(info['grid'],order='F')
|
||||
microstructure_cropped = np.zeros(newInfo['grid'],'i')
|
||||
microstructure_cropped.fill({True:options.fill,False:microstructure.max()+1}[options.fill>0])
|
||||
microstructure_cropped.fill(options.fill if options.fill > 0 else microstructure.max()+1)
|
||||
xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \
|
||||
set(xrange(info['grid'][0])))
|
||||
yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \
|
||||
|
@ -151,44 +106,47 @@ for file in files:
|
|||
newInfo['origin'] = info['origin']+info['size']/info['grid']*options.offset
|
||||
newInfo['microstructures'] = microstructure_cropped.max()
|
||||
|
||||
#--- report ---------------------------------------------------------------------------------------
|
||||
if (any(newInfo['grid'] != info['grid'])):
|
||||
file['croak'].write('--> grid a b c: %s\n'%(' x '.join(map(str,newInfo['grid']))))
|
||||
if (any(newInfo['size'] != info['size'])):
|
||||
file['croak'].write('--> size x y z: %s\n'%(' x '.join(map(str,newInfo['size']))))
|
||||
if (any(newInfo['origin'] != info['origin'])):
|
||||
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin']))))
|
||||
if (newInfo['microstructures'] != info['microstructures']):
|
||||
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
if np.any(newInfo['grid'] < 1):
|
||||
file['croak'].write('invalid new grid a b c.\n')
|
||||
continue
|
||||
if np.any(newInfo['size'] <= 0.0):
|
||||
file['croak'].write('invalid new size x y z.\n')
|
||||
remarks = []
|
||||
errors = []
|
||||
|
||||
if (any(newInfo['grid'] != info['grid'])): remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid']))))
|
||||
if (any(newInfo['size'] != info['size'])): remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size']))))
|
||||
if (any(newInfo['origin'] != info['origin'])): remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin']))))
|
||||
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
|
||||
|
||||
if np.any(newInfo['grid'] < 1): errors.append('invalid new grid a b c.')
|
||||
if np.any(newInfo['size'] <= 0.0): errors.append('invalid new size x y z.')
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
table.labels_clear()
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(newInfo['grid'][0],newInfo['grid'][1],newInfo['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(newInfo['size'][0],newInfo['size'][1],newInfo['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=newInfo['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
|
||||
formatwidth = int(math.floor(math.log10(microstructure_cropped.max())+1))
|
||||
table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close()
|
||||
table.output_close()
|
||||
os.rename(file['name']+'_tmp',file['name'])
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,82 +12,54 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [file[s]]', description = """
|
||||
Produce VTK rectilinear mesh of structure data from geom description
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-n','--nodata', dest='data', action='store_false',
|
||||
help='omit microstructure data, just generate mesh')
|
||||
parser.add_option('-m','--nodata',
|
||||
dest = 'data',
|
||||
action = 'store_false',
|
||||
help = 'generate mesh without microstructure index data')
|
||||
|
||||
parser.set_defaults(data = True)
|
||||
parser.set_defaults(data = True,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
#--- setup file handles --------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr, })
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':sys.stdout, 'croak':sys.stdout, })
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
theTable = damask.ASCIItable(file['input'],file['output'],labels=False)
|
||||
theTable.head_read()
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, labeled = False, readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
for header in theTable.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
file['croak'](errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- generate VTK rectilinear grid --------------------------------------------------------------------------------
|
||||
|
||||
#--- generate grid --------------------------------------------------------------------------------
|
||||
grid = vtk.vtkRectilinearGrid()
|
||||
grid.SetDimensions([x+1 for x in info['grid']])
|
||||
for i in xrange(3):
|
||||
|
@ -102,43 +74,36 @@ for file in files:
|
|||
#--- read microstructure information --------------------------------------------------------------
|
||||
|
||||
if options.data:
|
||||
microstructure = table.microstructure_read(info['grid']) # read microstructure
|
||||
|
||||
structure = vtk.vtkIntArray()
|
||||
structure.SetName('Microstructures')
|
||||
|
||||
while theTable.data_read():
|
||||
items = theTable.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
for item in items:
|
||||
structure.InsertNextValue(item)
|
||||
for idx in microstructure:
|
||||
structure.InsertNextValue(idx)
|
||||
|
||||
grid.GetCellData().AddArray(structure)
|
||||
|
||||
#--- write data -----------------------------------------------------------------------------------
|
||||
if file['name'] == 'STDIN':
|
||||
# --- write data -----------------------------------------------------------------------------------
|
||||
|
||||
if name == 'STDIN':
|
||||
writer = vtk.vtkRectilinearGridWriter()
|
||||
writer.WriteToOutputStringOn()
|
||||
writer.SetFileTypeToASCII()
|
||||
writer.SetHeader('# powered by '+scriptID)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(grid)
|
||||
else:
|
||||
writer.SetInputData(grid)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
|
||||
else: writer.SetInputData(grid)
|
||||
writer.Write()
|
||||
file['output'].write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
|
||||
sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
|
||||
else:
|
||||
(dir,file) = os.path.split(file['name'])
|
||||
(dir,filename) = os.path.split(name)
|
||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||
writer.SetDataModeToBinary()
|
||||
writer.SetCompressorTypeToZLib()
|
||||
writer.SetFileName(os.path.join(dir,'mesh_'+os.path.splitext(file)[0]
|
||||
writer.SetFileName(os.path.join(dir,'mesh_'+os.path.splitext(filename)[0]
|
||||
+'.'+writer.GetDefaultFileExtension()))
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(grid)
|
||||
else:
|
||||
writer.SetInputData(grid)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
|
||||
else: writer.SetInputData(grid)
|
||||
writer.Write()
|
||||
|
||||
table.close()
|
||||
|
|
|
@ -34,23 +34,11 @@ def periodic_3Dpad(array, rimdim=(1,1,1)):
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
features = [
|
||||
{'aliens': 1, 'names': ['boundary','biplane'],},
|
||||
{'aliens': 2, 'names': ['tripleline',],},
|
||||
{'aliens': 3, 'names': ['quadruplepoint',],}
|
||||
{'aliens': 1, 'alias': ['boundary','biplane'],},
|
||||
{'aliens': 2, 'alias': ['tripleline',],},
|
||||
{'aliens': 3, 'alias': ['quadruplepoint',],}
|
||||
]
|
||||
|
||||
neighborhoods = {
|
||||
|
@ -101,118 +89,79 @@ boundaries, triple lines, and quadruple points.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-t','--type', dest = 'type', action = 'extend', type = 'string', metavar = '<string LIST>',
|
||||
help = 'feature type (%s) '%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
|
||||
parser.add_option('-n','--neighborhood', dest='neighborhood', choices = neighborhoods.keys(), metavar = 'string',
|
||||
parser.add_option('-t','--type',
|
||||
dest = 'type',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'feature type (%s) '%(', '.join(map(lambda x:'|'.join(x['alias']),features))) )
|
||||
parser.add_option('-n','--neighborhood',
|
||||
dest = 'neighborhood',
|
||||
choices = neighborhoods.keys(), metavar = 'string',
|
||||
help = 'type of neighborhood (%s) [neumann]'%(', '.join(neighborhoods.keys())))
|
||||
parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar='float',
|
||||
parser.add_option('-s', '--scale',
|
||||
dest = 'scale',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'voxel size [%default]')
|
||||
|
||||
parser.set_defaults(type = [])
|
||||
parser.set_defaults(neighborhood = 'neumann')
|
||||
parser.set_defaults(scale = 1.0)
|
||||
parser.set_defaults(type = [],
|
||||
neighborhood = 'neumann',
|
||||
scale = 1.0,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if len(options.type) == 0: parser.error('please select a feature type')
|
||||
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))):
|
||||
parser.error('type must be chosen from (%s)...'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
|
||||
if len(options.type) == 0 or \
|
||||
not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['alias'],features))))):
|
||||
parser.error('sleect feature type from (%s).'%(', '.join(map(lambda x:'|'.join(x['alias']),features))) )
|
||||
if 'biplane' in options.type and 'boundary' in options.type:
|
||||
parser.error("please select only one alias for 'biplane' and 'boundary'")
|
||||
parser.error("only one alias out 'biplane' and 'boundary' required")
|
||||
|
||||
feature_list = []
|
||||
for i,feature in enumerate(features):
|
||||
for name in feature['names']:
|
||||
for name in feature['alias']:
|
||||
for myType in options.type:
|
||||
if name.startswith(myType):
|
||||
feature_list.append(i) # remember valid features
|
||||
feature_list.append(i) # remember selected features
|
||||
break
|
||||
|
||||
#--- setup file handles ---------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':[open(features[feature]['names'][0]+'_'+name,'w') for feature in feature_list],
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, labeled = False, readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'][0],labels = False)
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
newInfo = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue # skip blank lines
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
|
||||
i = 0
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
while table.data_read():
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
|
||||
|
||||
table.close()
|
||||
|
||||
neighborhood = neighborhoods[options.neighborhood]
|
||||
convoluted = np.empty([len(neighborhood)]+list(info['grid']+2),'i')
|
||||
structure = periodic_3Dpad(microstructure.reshape(info['grid'],order='F'))
|
||||
structure = periodic_3Dpad(microstructure)
|
||||
|
||||
for i,p in enumerate(neighborhood):
|
||||
stencil = np.zeros((3,3,3),'i')
|
||||
|
@ -222,47 +171,56 @@ for file in files:
|
|||
p[2]+1] = 1
|
||||
convoluted[i,:,:,:] = ndimage.convolve(structure,stencil)
|
||||
|
||||
distance = np.ones((len(feature_list),info['grid'][0],info['grid'][1],info['grid'][2]),'d')
|
||||
# distance = np.ones(info['grid'],'d')
|
||||
|
||||
convoluted = np.sort(convoluted,axis = 0)
|
||||
uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
|
||||
uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
|
||||
|
||||
for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood
|
||||
for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood
|
||||
uniques += np.where(np.logical_and(
|
||||
convoluted[i,1:-1,1:-1,1:-1] != convoluted[i-1,1:-1,1:-1,1:-1], # flip of ID difference detected?
|
||||
convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself?
|
||||
1,0) # count flip
|
||||
convoluted[i,1:-1,1:-1,1:-1] != convoluted[i-1,1:-1,1:-1,1:-1], # flip of ID difference detected?
|
||||
convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself?
|
||||
1,0) # count flip
|
||||
|
||||
for i,feature_id in enumerate(feature_list):
|
||||
distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
|
||||
for feature in feature_list:
|
||||
|
||||
for i in xrange(len(feature_list)):
|
||||
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
|
||||
table = damask.ASCIItable(name = name, outname = features[feature]['alias'][0]+'_'+name,
|
||||
buffered = False, labeled = False, writeonly = True)
|
||||
|
||||
for i,feature in enumerate(feature_list):
|
||||
newInfo['microstructures'] = int(math.ceil(distance[i,:,:,:].max()))
|
||||
distance = np.where(uniques >= features[feature]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present
|
||||
distance = ndimage.morphology.distance_transform_edt(distance)*[options.scale]*3
|
||||
|
||||
# for i in xrange(len(feature_list)):
|
||||
# distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
|
||||
|
||||
# for i,feature in enumerate(feature_list):
|
||||
info['microstructures'] = int(math.ceil(distance.max()))
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
table = damask.ASCIItable(file['input'],file['output'][i],labels = False)
|
||||
table.labels_clear()
|
||||
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1))
|
||||
table.data = distance[i,:,:,:].reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
|
||||
formatwidth = int(math.floor(math.log10(distance.max())+1))
|
||||
table.data = distance.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
file['output'][i].close()
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close()
|
||||
|
||||
table.close()
|
||||
|
||||
|
||||
|
||||
### 'output':[open(features[feature]['names'][0]+'_'+name,'w') for feature in feature_list],
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os,sys,math,string
|
||||
import numpy as np
|
||||
from optparse import OptionParser
|
||||
from PIL import Image,ImageOps
|
||||
import damask
|
||||
|
||||
scriptID = string.replace('$Id$','\n','\\n')
|
||||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
|
||||
Generate geometry description from (multilayer) images.
|
||||
Microstructure index is based on gray scale value (1..256).
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('--homogenization',
|
||||
dest = 'homogenization',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'homogenization index [%default]')
|
||||
|
||||
parser.set_defaults(homogenization = 1,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0] +'.geom',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- read image ------------------------------------------------------------------------------------
|
||||
|
||||
img = Image.open(name).convert(mode = 'L') # open and convert to grayscale 8bit
|
||||
|
||||
slice = 0
|
||||
while True:
|
||||
try:
|
||||
img.seek(slice) # advance to slice
|
||||
layer = np.expand_dims(1+np.array(img,dtype='uint16'),axis = 0) # read image layer
|
||||
microstructure = layer if slice == 0 else np.vstack((microstructure,layer)) # add to microstructure data
|
||||
slice += 1 # advance to next slice
|
||||
except EOFError:
|
||||
break
|
||||
|
||||
# http://docs.scipy.org/doc/scipy/reference/ndimage.html
|
||||
# http://scipy-lectures.github.io/advanced/image_processing/
|
||||
|
||||
info = {
|
||||
'grid': np.array(microstructure.shape,'i')[::-1],
|
||||
'size': np.array(microstructure.shape,'d')[::-1],
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': len(np.unique(microstructure)),
|
||||
'homogenization': options.homogenization,
|
||||
}
|
||||
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
file['croak'](errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.info_clear()
|
||||
table.info_append([
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
||||
table.data = microstructure.reshape((info['grid'][1]*info['grid'][2],info['grid'][0]),order='C')
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
||||
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
|
@ -27,79 +27,115 @@ Generate a geometry file of a bicontinuous structure of given type.
|
|||
""", version = scriptID)
|
||||
|
||||
|
||||
parser.add_option('-t','--type', dest='type', choices=minimal_surfaces, metavar='string', \
|
||||
help='type of minimal surface [primitive] {%s}' %(','.join(minimal_surfaces)))
|
||||
parser.add_option('-f','--threshold', dest='threshold', type='float', metavar='float', \
|
||||
help='threshold value defining minimal surface [%default]')
|
||||
parser.add_option('-g', '--grid', dest='grid', type='int', nargs=3, metavar='int int int', \
|
||||
help='a,b,c grid of hexahedral box [%default]')
|
||||
parser.add_option('-s', '--size', dest='size', type='float', nargs=3, metavar='float float float', \
|
||||
help='x,y,z size of hexahedral box [%default]')
|
||||
parser.add_option('-p', '--periods', dest='periods', type='int', metavar= 'int', \
|
||||
help='number of repetitions of unit cell [%default]')
|
||||
parser.add_option('--homogenization', dest='homogenization', type='int', metavar= 'int', \
|
||||
help='homogenization index to be used [%default]')
|
||||
parser.add_option('--m', dest='microstructure', type='int', nargs = 2, metavar= 'int int', \
|
||||
help='two microstructure indices to be used [%default]')
|
||||
parser.add_option('-2', '--twodimensional', dest='twoD', action='store_true', \
|
||||
help='output geom file with two-dimensional data arrangement [%default]')
|
||||
parser.set_defaults(type = minimal_surfaces[0])
|
||||
parser.set_defaults(threshold = 0.0)
|
||||
parser.set_defaults(periods = 1)
|
||||
parser.set_defaults(grid = (16,16,16))
|
||||
parser.set_defaults(size = (1.0,1.0,1.0))
|
||||
parser.set_defaults(homogenization = 1)
|
||||
parser.set_defaults(microstructure = (1,2))
|
||||
parser.set_defaults(twoD = False)
|
||||
parser.add_option('-t','--type',
|
||||
dest = 'type',
|
||||
choices = minimal_surfaces, metavar = 'string',
|
||||
help = 'type of minimal surface [primitive] {%s}' %(','.join(minimal_surfaces)))
|
||||
parser.add_option('-f','--threshold',
|
||||
dest = 'threshold',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'threshold value defining minimal surface [%default]')
|
||||
parser.add_option('-g', '--grid',
|
||||
dest = 'grid',
|
||||
type = 'int', nargs = 3, metavar = 'int int int',
|
||||
help = 'a,b,c grid of hexahedral box [%default]')
|
||||
parser.add_option('-s', '--size',
|
||||
dest = 'size',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'x,y,z size of hexahedral box [%default]')
|
||||
parser.add_option('-p', '--periods',
|
||||
dest = 'periods',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'number of repetitions of unit cell [%default]')
|
||||
parser.add_option('--homogenization',
|
||||
dest = 'homogenization',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'homogenization index to be used [%default]')
|
||||
parser.add_option('--m',
|
||||
dest = 'microstructure',
|
||||
type = 'int', nargs = 2, metavar = 'int int',
|
||||
help = 'two microstructure indices to be used [%default]')
|
||||
parser.add_option('-1', '--onedimensional',
|
||||
dest = 'oneD',
|
||||
action = 'store_true',
|
||||
help = 'output geom file with two-dimensional data arrangement [%default]')
|
||||
parser.set_defaults(type = minimal_surfaces[0],
|
||||
threshold = 0.0,
|
||||
periods = 1,
|
||||
grid = (16,16,16),
|
||||
size = (1.0,1.0,1.0),
|
||||
homogenization = 1,
|
||||
microstructure = (1,2),
|
||||
oneD = False,
|
||||
)
|
||||
|
||||
(options,filename) = parser.parse_args()
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handle -------------------------------------
|
||||
if filename == []:
|
||||
file = {'output':sys.stdout, 'croak':sys.stderr}
|
||||
else:
|
||||
file = {'output':open(filename[0],'w'), 'croak':sys.stderr}
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
info = {
|
||||
'grid': np.array(options.grid),
|
||||
'size': np.array(options.size),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': max(options.microstructure),
|
||||
'homogenization': options.homogenization
|
||||
}
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name,
|
||||
buffered = False, labeled = False, writeonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
|
||||
# ------------------------------------------ make grid -------------------------------------
|
||||
|
||||
info = {
|
||||
'grid': np.array(options.grid),
|
||||
'size': np.array(options.size),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': max(options.microstructure),
|
||||
'homogenization': options.homogenization
|
||||
}
|
||||
|
||||
#--- report ---------------------------------------------------------------------------------------
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
sys.exit()
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
sys.exit()
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
header = [scriptID + ' ' + ' '.join(sys.argv[1:])+'\n']
|
||||
header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],))
|
||||
header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],))
|
||||
header.append("origin\tx %f\ty %f\tz %f\n"%(info['origin'][0],info['origin'][1],info['origin'][2],))
|
||||
header.append("microstructures\t%i\n"%info['microstructures'])
|
||||
header.append("homogenization\t%i\n"%info['homogenization'])
|
||||
file['output'].write('%i\theader\n'%(len(header))+''.join(header))
|
||||
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append([
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
])
|
||||
table.head_write()
|
||||
|
||||
#--- write data -----------------------------------------------------------------------------------
|
||||
for z in xrange(options.grid[2]):
|
||||
Z = options.periods*2.0*math.pi*(z+0.5)/options.grid[2]
|
||||
for y in xrange(options.grid[1]):
|
||||
Y = options.periods*2.0*math.pi*(y+0.5)/options.grid[1]
|
||||
for x in xrange(options.grid[0]):
|
||||
X = options.periods*2.0*math.pi*(x+0.5)/options.grid[0]
|
||||
file['output'].write(str(options.microstructure[0]) if options.threshold > surface[options.type](X,Y,Z)
|
||||
else str(options.microstructure[1]))
|
||||
file['output'].write(' ' if options.twoD else '\n')
|
||||
file['output'].write('\n' if options.twoD else '')
|
||||
X = options.periods*2.0*math.pi*(np.arange(options.grid[0])+0.5)/options.grid[0]
|
||||
Y = options.periods*2.0*math.pi*(np.arange(options.grid[1])+0.5)/options.grid[1]
|
||||
Z = options.periods*2.0*math.pi*(np.arange(options.grid[2])+0.5)/options.grid[2]
|
||||
|
||||
for z in xrange(options.grid[2]):
|
||||
for y in xrange(options.grid[1]):
|
||||
table.data_clear()
|
||||
for x in xrange(options.grid[0]):
|
||||
table.data_append(options.microstructure[options.threshold < surface[options.type](X[x],Y[y],Z[z])])
|
||||
if options.oneD:
|
||||
table.data_write()
|
||||
table.data_clear()
|
||||
table.data_write()
|
||||
|
||||
table.close()
|
||||
|
|
|
@ -32,36 +32,62 @@ Generate geometry description and material configuration from position, phase, a
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('--coordinates', dest='coordinates', type='string', metavar='string',
|
||||
help='coordinates label')
|
||||
parser.add_option('--phase', dest='phase', type='string', metavar='string',
|
||||
help='phase label')
|
||||
parser.add_option('-t', '--tolerance', dest='tolerance', type='float', metavar='float',
|
||||
parser.add_option('--coordinates',
|
||||
dest = 'coordinates',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'coordinates label')
|
||||
parser.add_option('--phase',
|
||||
dest = 'phase',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'phase label')
|
||||
parser.add_option('-t', '--tolerance',
|
||||
dest = 'tolerance',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'angular tolerance for orientation squashing [%default]')
|
||||
parser.add_option('-e', '--eulers', dest='eulers', metavar='string',
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'Euler angles label')
|
||||
parser.add_option('-d', '--degrees', dest='degrees', action='store_true',
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest = 'degrees',
|
||||
action = 'store_true',
|
||||
help = 'angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix', dest='matrix', metavar='string',
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest = 'matrix',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'orientation matrix label')
|
||||
parser.add_option('-a', dest='a', metavar='string',
|
||||
parser.add_option('-a',
|
||||
dest='a',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame a vector label')
|
||||
parser.add_option('-b', dest='b', metavar='string',
|
||||
parser.add_option('-b',
|
||||
dest='b',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'crystal frame b vector label')
|
||||
parser.add_option('-c', dest='c', metavar='string',
|
||||
parser.add_option('-c',
|
||||
dest = 'c',
|
||||
type = 'string', metavar='string',
|
||||
help = 'crystal frame c vector label')
|
||||
parser.add_option('-q', '--quaternion', dest='quaternion', metavar='string',
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest = 'quaternion',
|
||||
type = 'string', metavar='string',
|
||||
help = 'quaternion label')
|
||||
parser.add_option( '--axes', dest='axes', nargs=3, metavar=' '.join(['string']*3),
|
||||
parser.add_option('--axes',
|
||||
dest = 'axes',
|
||||
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
||||
help = 'orientation coordinate frame in terms of position coordinate frame [same]')
|
||||
parser.add_option('-s', '--symmetry', dest='symmetry', action='extend',
|
||||
metavar='<string LIST>',
|
||||
help = 'crystal symmetry [%s] {%s} '%(damask.Symmetry.lattices[-1],
|
||||
', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('--homogenization', dest='homogenization', type='int', metavar='int',
|
||||
help='homogenization index to be used [%default]')
|
||||
parser.add_option('--crystallite', dest='crystallite', type='int', metavar='int',
|
||||
help='crystallite index to be used [%default]')
|
||||
parser.add_option('-s', '--symmetry',
|
||||
dest = 'symmetry',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'crystal symmetry %default {{{}}} '.format(', '.join(damask.Symmetry.lattices[1:])))
|
||||
parser.add_option('--homogenization',
|
||||
dest = 'homogenization',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'homogenization index to be used [%default]')
|
||||
parser.add_option('--crystallite',
|
||||
dest = 'crystallite',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'crystallite index to be used [%default]')
|
||||
|
||||
parser.set_defaults(symmetry = [damask.Symmetry.lattices[-1]],
|
||||
tolerance = 0.0,
|
||||
|
@ -82,7 +108,7 @@ input = [options.eulers != None,
|
|||
|
||||
if np.sum(input) != 1: parser.error('needs exactly one orientation input format...')
|
||||
if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])):
|
||||
parser.error('invalid axes %s %s %s'%tuple(options.axes))
|
||||
parser.error('invalid axes {axes[0]} {axes[1]} {axes[2]}'.format(axes=options.axes))
|
||||
|
||||
(label,dim,inputtype) = [(options.eulers,3,'eulers'),
|
||||
([options.a,options.b,options.c],[3,3,3],'frame'),
|
||||
|
@ -90,36 +116,33 @@ if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y
|
|||
(options.quaternion,4,'quaternion'),
|
||||
][np.where(input)[0][0]] # select input label that was requested
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
|
||||
options.tolerance *= toRadians # angular tolerance in radians
|
||||
options.tolerance *= toRadians # ensure angular tolerance in radians
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name + '_tmp','w'),
|
||||
'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0] + '.geom',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# ------------------------------------------ read head ---------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
|
||||
# ------------------------------------------ sanity checks ---------------------------------------
|
||||
|
||||
errors = []
|
||||
if table.label_dimension(options.coordinates) != 2:
|
||||
errors.append('coordinates %s need to have two dimensions...'%options.coordinates)
|
||||
errors.append('coordinates {} need to have two dimensions.'.format(options.coordinates))
|
||||
if not np.all(table.label_dimension(label) == dim):
|
||||
errors.append('orientation %s needs to have dimension %i...\n'%(label,dim))
|
||||
errors.append('orientation {} needs to have dimension {}.'.format(label,dim))
|
||||
if options.phase != None and table.label_dimension(options.phase) != 1:
|
||||
errors.append('phase column %s is not scalar...'%options.phase)
|
||||
errors.append('phase column {} is not scalar.'.format(options.phase))
|
||||
|
||||
if errors == []:
|
||||
if errors == []: # so far no errors?
|
||||
table.data_readArray([options.coordinates,label]+([] if options.phase == None else [options.phase]))
|
||||
|
||||
if options.phase == None:
|
||||
|
@ -135,10 +158,10 @@ for name in filenames:
|
|||
if nX*nY != len(table.data) \
|
||||
or np.any(np.abs(np.log10((coordsX[1:]-coordsX[:-1])/dX)) > 0.01) \
|
||||
or np.any(np.abs(np.log10((coordsY[1:]-coordsY[:-1])/dY)) > 0.01):
|
||||
errors.append('data is not on square grid...')
|
||||
errors.append('data is not on square grid.')
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors)+'\n')
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
|
@ -149,7 +172,7 @@ for name in filenames:
|
|||
index = np.lexsort((table.data[:,0],table.data[:,1])) # index of rank when sorting x fast, y slow
|
||||
rank = np.argsort(index) # rank of index
|
||||
KDTree = scipy.spatial.KDTree((table.data[:,:2]-np.array([coordsX[0],coordsY[0]])) \
|
||||
/ np.array([dX,dY])) # build KDTree with dX = dY = 1
|
||||
/ np.array([dX,dY])) # build KDTree with dX = dY = 1
|
||||
|
||||
microstructure = np.zeros(nX*nY,dtype='uint32') # initialize empty microstructure
|
||||
symQuats = [] # empty list of sym equiv orientations
|
||||
|
@ -158,8 +181,7 @@ for name in filenames:
|
|||
myRank = 0 # rank of current grid point
|
||||
for y in xrange(nY):
|
||||
for x in xrange(nX):
|
||||
if (myRank+1)%max(1,nX*nY/100) == 0:
|
||||
file['croak'].write('.')
|
||||
if (myRank+1)%(nX*nY/100.) < 1: table.croak('.',False)
|
||||
myData = table.data[index[myRank]]
|
||||
mySym = options.symmetry[min(int(myData[colPhase]),len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index)
|
||||
if inputtype == 'eulers':
|
||||
|
@ -189,21 +211,22 @@ for name in filenames:
|
|||
for n in neighbors: # check each neighbor
|
||||
if myRank <= rank[n] or table.data[n,colPhase] != myData[colPhase]: continue # skip myself, anyone further ahead (cannot yet have a grain ID), and other phases
|
||||
for q in symQuats[microstructure[rank[n]]-1]:
|
||||
if abs((q*oInv).asAngleAxis()[0]) <= options.tolerance: # found existing orientation resembling me
|
||||
if abs((q*oInv).asAngleAxis()[0]) <= options.tolerance: # found existing orientation resembling me
|
||||
microstructure[myRank] = microstructure[rank[n]]
|
||||
breaker = True; break
|
||||
if breaker: break
|
||||
|
||||
if microstructure[myRank] == 0: # no other orientation resembled me
|
||||
nGrains += 1
|
||||
microstructure[myRank] = nGrains
|
||||
nGrains += 1 # make new grain ...
|
||||
microstructure[myRank] = nGrains # ... and assign to me
|
||||
symQuats.append(o.equivalentQuaternions()) # store all symmetrically equivalent orientations for future comparison
|
||||
phases.append(myData[colPhase]) # store phase info for future reporting
|
||||
|
||||
myRank += 1
|
||||
|
||||
file['croak'].write('\n')
|
||||
#--- generate header ----------------------------------------------------------------------------
|
||||
table.croak('')
|
||||
|
||||
# --- generate header ----------------------------------------------------------------------------
|
||||
|
||||
info = {
|
||||
'grid': np.array([nX,nY,1]),
|
||||
|
@ -217,14 +240,15 @@ for name in filenames:
|
|||
'microstructures': nGrains,
|
||||
'homogenization': options.homogenization,
|
||||
}
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
formatwidth = 1+int(math.log10(info['microstructures']))
|
||||
|
||||
|
@ -246,11 +270,11 @@ for name in filenames:
|
|||
table.info_clear()
|
||||
table.info_append([
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(info['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
config_header,
|
||||
])
|
||||
table.head_write()
|
||||
|
@ -263,6 +287,3 @@ for name in filenames:
|
|||
#--- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close()
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',
|
||||
os.path.splitext(file['name'])[0] + '.geom')
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
import os,re,sys,math,string
|
||||
import numpy as np
|
||||
import multiprocessing
|
||||
from optparse import OptionParser
|
||||
import damask
|
||||
|
||||
|
@ -30,286 +31,323 @@ def meshgrid2(*arrs):
|
|||
ans.insert(0,arr2)
|
||||
return tuple(ans)
|
||||
|
||||
def laguerreTessellation(undeformed, coords, weights, grain):
|
||||
def findClosestSeed(fargs):
|
||||
point, seeds, weightssquared = fargs
|
||||
tmp = np.repeat(point.reshape(3,1), len(seeds), axis=1).T
|
||||
dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - weightssquared
|
||||
return np.argmin(dist) # seed point closest to point
|
||||
|
||||
weight = np.power(np.tile(weights, 27),2) # Laguerre weights (squared)
|
||||
micro = np.zeros(undeformed.shape[0])
|
||||
N = coords.shape[0] # Number of seeds points
|
||||
periodic = np.array([
|
||||
[ -1,-1,-1 ],
|
||||
[ 0,-1,-1 ],
|
||||
[ 1,-1,-1 ],
|
||||
[ -1, 0,-1 ],
|
||||
[ 0, 0,-1 ],
|
||||
[ 1, 0,-1 ],
|
||||
[ -1, 1,-1 ],
|
||||
[ 0, 1,-1 ],
|
||||
[ 1, 1,-1 ],
|
||||
[ -1,-1, 0 ],
|
||||
[ 0,-1, 0 ],
|
||||
[ 1,-1, 0 ],
|
||||
[ -1, 0, 0 ],
|
||||
[ 0, 0, 0 ],
|
||||
[ 1, 0, 0 ],
|
||||
[ -1, 1, 0 ],
|
||||
[ 0, 1, 0 ],
|
||||
[ 1, 1, 0 ],
|
||||
[ -1,-1, 1 ],
|
||||
[ 0,-1, 1 ],
|
||||
[ 1,-1, 1 ],
|
||||
[ -1, 0, 1 ],
|
||||
[ 0, 0, 1 ],
|
||||
[ 1, 0, 1 ],
|
||||
[ -1, 1, 1 ],
|
||||
[ 0, 1, 1 ],
|
||||
[ 1, 1, 1 ],
|
||||
]).astype(float)
|
||||
|
||||
def laguerreTessellation(undeformed, coords, weights, grains, nonperiodic = False, cpus = 2):
|
||||
|
||||
copies = \
|
||||
np.array([
|
||||
[ 0, 0, 0 ],
|
||||
]).astype(float) if nonperiodic else \
|
||||
np.array([
|
||||
[ -1,-1,-1 ],
|
||||
[ 0,-1,-1 ],
|
||||
[ 1,-1,-1 ],
|
||||
[ -1, 0,-1 ],
|
||||
[ 0, 0,-1 ],
|
||||
[ 1, 0,-1 ],
|
||||
[ -1, 1,-1 ],
|
||||
[ 0, 1,-1 ],
|
||||
[ 1, 1,-1 ],
|
||||
[ -1,-1, 0 ],
|
||||
[ 0,-1, 0 ],
|
||||
[ 1,-1, 0 ],
|
||||
[ -1, 0, 0 ],
|
||||
[ 0, 0, 0 ],
|
||||
[ 1, 0, 0 ],
|
||||
[ -1, 1, 0 ],
|
||||
[ 0, 1, 0 ],
|
||||
[ 1, 1, 0 ],
|
||||
[ -1,-1, 1 ],
|
||||
[ 0,-1, 1 ],
|
||||
[ 1,-1, 1 ],
|
||||
[ -1, 0, 1 ],
|
||||
[ 0, 0, 1 ],
|
||||
[ 1, 0, 1 ],
|
||||
[ -1, 1, 1 ],
|
||||
[ 0, 1, 1 ],
|
||||
[ 1, 1, 1 ],
|
||||
]).astype(float)
|
||||
|
||||
squaredweights = np.power(np.tile(weights,len(copies)),2) # Laguerre weights (squared, size N*n)
|
||||
# micro = np.zeros(undeformed.shape[0],'i')
|
||||
N = coords.shape[0] # Number of seeds points
|
||||
|
||||
for i,vec in enumerate(periodic):
|
||||
for i,vec in enumerate(copies): # periodic copies of seed points (size N*n)
|
||||
seeds = np.append(seeds, coords+vec, axis=0) if i > 0 else coords+vec
|
||||
|
||||
arguments = [[arg] + [seeds,squaredweights] for arg in list(undeformed)]
|
||||
|
||||
# Initialize workers
|
||||
pool = multiprocessing.Pool(processes = cpus)
|
||||
|
||||
# Evaluate function
|
||||
result = pool.map_async(findClosestSeed, arguments)
|
||||
# closestSeeds = np.array(pool.map_async(findClosestSeed, arguments),'i')
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
for i,point in enumerate(undeformed):
|
||||
|
||||
tmp = np.repeat(point.reshape(3,1), N*27, axis=1).T
|
||||
dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - weight
|
||||
micro[i] = grain[np.argmin(dist)%N]
|
||||
|
||||
return micro
|
||||
closestSeeds = np.array(result.get()).flatten()
|
||||
print 'shape of result',closestSeeds.shape
|
||||
|
||||
return grains[closestSeeds%N]
|
||||
|
||||
# for i,point in enumerate(undeformed):
|
||||
# tmp = np.repeat(point.reshape(3,1), N*len(copies), axis=1).T
|
||||
# dist = np.sum((tmp - seeds)*(tmp - seeds),axis=1) - squaredweights
|
||||
# micro[i] = grains[np.argmin(dist)%N]
|
||||
#
|
||||
# return micro
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Generate geometry description and material configuration by standard Voronoi tessellation of given seeds file.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-g', '--grid', dest='grid', type='int', nargs = 3, metavar=' '.join(['int']*3),
|
||||
help='a,b,c grid of hexahedral box [from seeds file]')
|
||||
parser.add_option('-s', '--size', dest='size', type='float', nargs = 3, metavar=' '.join(['float']*3),
|
||||
help='x,y,z size of hexahedral box [1.0 along largest grid point number]')
|
||||
parser.add_option('-o', '--origin', dest='origin', type='float', nargs = 3, metavar=' '.join(['float']*3),
|
||||
help='offset from old to new origin of grid')
|
||||
parser.add_option('--homogenization', dest='homogenization', type='int', metavar = 'int',
|
||||
help='homogenization index to be used [%default]')
|
||||
parser.add_option('--phase', dest='phase', type='int', metavar = 'int',
|
||||
help='phase index to be used [%default]')
|
||||
parser.add_option('--crystallite', dest='crystallite', type='int', metavar = 'int',
|
||||
help='crystallite index to be used [%default]')
|
||||
parser.add_option('-c', '--configuration', dest='config', action='store_true',
|
||||
help='output material configuration [%default]')
|
||||
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int',
|
||||
help='seed of random number generator for second phase distribution [%default]')
|
||||
parser.add_option('--secondphase', type='float', dest='secondphase', metavar= 'float',
|
||||
help='volume fraction of randomly distribute second phase [%default]')
|
||||
parser.add_option('-l', '--laguerre', dest='laguerre', action='store_true',
|
||||
help='use Laguerre (weighted Voronoi) tessellation [%default]')
|
||||
parser.set_defaults(grid = (0,0,0),
|
||||
size = (0.0,0.0,0.0),
|
||||
origin = (0.0,0.0,0.0),
|
||||
parser.add_option('-g', '--grid',
|
||||
dest = 'grid',
|
||||
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
|
||||
help = 'a,b,c grid of hexahedral box [from seeds file]')
|
||||
parser.add_option('-s', '--size',
|
||||
dest = 'size',
|
||||
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
|
||||
help = 'x,y,z size of hexahedral box [from seeds file or 1.0 along largest grid point number]')
|
||||
parser.add_option('-o', '--origin',
|
||||
dest = 'origin',
|
||||
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
|
||||
help = 'offset from old to new origin of grid')
|
||||
parser.add_option('-p', '--position',
|
||||
dest = 'position',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for seed positions [%default]')
|
||||
parser.add_option('-w', '--weight',
|
||||
dest = 'weight',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for seed weights [%default]')
|
||||
parser.add_option('-m', '--microstructure',
|
||||
dest = 'microstructure',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for seed microstructures [%default]')
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest = 'eulers',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for seed Euler angles [%default]')
|
||||
parser.add_option('--axes',
|
||||
dest = 'axes',
|
||||
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
||||
help = 'orientation coordinate frame in terms of position coordinate frame [same]')
|
||||
parser.add_option('--homogenization',
|
||||
dest = 'homogenization',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'homogenization index to be used [%default]')
|
||||
parser.add_option('--crystallite',
|
||||
dest = 'crystallite',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'crystallite index to be used [%default]')
|
||||
parser.add_option('--phase',
|
||||
dest = 'phase',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'phase index to be used [%default]')
|
||||
parser.add_option('-r', '--rnd',
|
||||
dest = 'randomSeed',
|
||||
type = 'int', metavar='int',
|
||||
help = 'seed of random number generator for second phase distribution [%default]')
|
||||
parser.add_option('--secondphase',
|
||||
dest = 'secondphase',
|
||||
type = 'float', metavar= 'float',
|
||||
help = 'volume fraction of randomly distribute second phase [%default]')
|
||||
parser.add_option('-l', '--laguerre',
|
||||
dest = 'laguerre',
|
||||
action = 'store_true',
|
||||
help = 'use Laguerre (weighted Voronoi) tessellation [%default]')
|
||||
parser.add_option('--cpus',
|
||||
dest = 'cpus',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'number of parallel processes to use for Laguerre tessellation [%default]')
|
||||
parser.add_option('--nonperiodic',
|
||||
dest = 'nonperiodic',
|
||||
action = 'store_true',
|
||||
help = 'use nonperiodic tessellation [%default]')
|
||||
|
||||
parser.set_defaults(grid = None,
|
||||
size = None,
|
||||
origin = None,
|
||||
position = 'pos',
|
||||
weight = 'weight',
|
||||
microstructure = 'microstructure',
|
||||
eulers = 'Euler',
|
||||
homogenization = 1,
|
||||
phase = 1,
|
||||
crystallite = 1,
|
||||
phase = 1,
|
||||
secondphase = 0.0,
|
||||
config = False,
|
||||
laguerre = False,
|
||||
randomSeed = None,
|
||||
cpus = 2,
|
||||
laguerre = False,
|
||||
nonperiodic = False,
|
||||
randomSeed = None,
|
||||
)
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.secondphase > 1.0 or options.secondphase < 0.0:
|
||||
parser.error('volume fraction of second phase (%f) out of bounds...'%options.secondphase)
|
||||
parser.error('volume fraction of second phase ({}) out of bounds.'.format(options.secondphase))
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name,
|
||||
outname = os.path.splitext(name)[0]+'.geom',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
# --- read header ----------------------------------------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
if options.grid != None: info['grid'] = options.grid
|
||||
if options.size != None: info['size'] = options.size
|
||||
if options.origin != None: info['origin'] = options.origin
|
||||
|
||||
# ------------------------------------------ sanity checks ---------------------------------------
|
||||
|
||||
remarks = []
|
||||
errors = []
|
||||
labels = []
|
||||
if np.all(table.label_index(['1_coords','2_coords','3_coords']) != -1):
|
||||
coords = ['1_coords','2_coords','3_coords']
|
||||
elif np.all(table.label_index(['x','y','z']) != -1):
|
||||
coords = ['x','y','z']
|
||||
|
||||
hasGrains = table.label_dimension(options.microstructure) == 1
|
||||
hasEulers = table.label_dimension(options.eulers) == 3
|
||||
hasWeights = table.label_dimension(options.weight) == 1
|
||||
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0) \
|
||||
and np.all(info['grid'] < 1): errors.append('invalid size x y z.')
|
||||
else:
|
||||
file['croak'].write('no coordinate data (1/2/3_coords | x/y/z) found ...')
|
||||
for i in xrange(3):
|
||||
if info['size'][i] <= 0.0: # any invalid size?
|
||||
info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid
|
||||
remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i]))
|
||||
|
||||
if table.label_dimension(options.position) != 3:
|
||||
errors.append('position columns "{}" have dimension {}.'.format(options.position,
|
||||
table.label_dimension(options.position)))
|
||||
else:
|
||||
labels += [options.position]
|
||||
|
||||
if not hasEulers: remarks.append('missing seed orientations...')
|
||||
else: labels += [options.eulers]
|
||||
if not hasGrains: remarks.append('missing seed microstructure indices...')
|
||||
else: labels += [options.microstructure]
|
||||
if options.laguerre and not hasWeights: remarks.append('missing seed weights...')
|
||||
else: labels += [options.weight]
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss=True)
|
||||
continue
|
||||
|
||||
labels += coords
|
||||
hasEulers = np.all(table.label_index(['phi1','Phi','phi2']) != -1)
|
||||
if hasEulers:
|
||||
labels += ['phi1','Phi','phi2']
|
||||
|
||||
hasGrains = table.label_index('microstructure') != -1
|
||||
if hasGrains:
|
||||
labels += ['microstructure']
|
||||
|
||||
hasWeight = table.label_index('weight') != -1
|
||||
if hasWeight:
|
||||
labels += ['weight']
|
||||
# ------------------------------------------ read seeds ---------------------------------------
|
||||
|
||||
table.data_readArray(labels)
|
||||
coords = table.data[:,table.label_index(coords)]
|
||||
eulers = table.data[:,table.label_index(['phi1','Phi','phi2'])] if hasEulers else np.zeros(3*len(coords))
|
||||
grain = table.data[:,table.label_index('microstructure')] if hasGrains else 1+np.arange(len(coords))
|
||||
weights = table.data[:,table.label_index('weight')] if hasWeight else np.zeros(len(coords))
|
||||
grainIDs = np.unique(grain).astype('i')
|
||||
coords = table.data[:,table.label_index(options.position):table.label_index(options.position)+3]
|
||||
eulers = table.data[:,table.label_index(options.eulers ):table.label_index(options.eulers )+3] if hasEulers else np.zeros(3*len(coords))
|
||||
grains = table.data[:,table.label_index(options.microstructure)].astype('i') if hasGrains else 1+np.arange(len(coords))
|
||||
weights = table.data[:,table.label_index(options.weight)] if hasWeights else np.zeros(len(coords))
|
||||
grainIDs = np.unique(grains).astype('i')
|
||||
NgrainIDs = len(grainIDs)
|
||||
|
||||
# --- tessellate microstructure ------------------------------------------------------------
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.array(options.size),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
'homogenization': options.homogenization,
|
||||
}
|
||||
newInfo = {
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
if info['microstructures'] != len(grainIDs):
|
||||
file['croak'].write('grain data not matching grain count (%i)...\n'%(len(grainIDs)))
|
||||
info['microstructures'] = len(grainIDs)
|
||||
x = (np.arange(info['grid'][0])+0.5)*info['size'][0]/info['grid'][0]
|
||||
y = (np.arange(info['grid'][1])+0.5)*info['size'][1]/info['grid'][1]
|
||||
z = (np.arange(info['grid'][2])+0.5)*info['size'][2]/info['grid'][2]
|
||||
|
||||
if 0 not in options.grid: # user-specified grid
|
||||
info['grid'] = np.array(options.grid)
|
||||
table.croak('tessellating...')
|
||||
|
||||
for i in xrange(3):
|
||||
if info['size'][i] <= 0.0: # any invalid size?
|
||||
info['size'][i] = float(info['grid'][i])/max(info['grid'])
|
||||
file['croak'].write('rescaling size %s...\n'%{0:'x',1:'y',2:'z'}[i])
|
||||
if options.laguerre:
|
||||
undeformed = np.vstack(np.meshgrid(x, y, z)).reshape(3,-1).T
|
||||
indices = laguerreTessellation(undeformed, coords, weights, grains, options.nonperiodic, options.cpus)
|
||||
else:
|
||||
coords = (coords*info['size']).T
|
||||
undeformed = np.vstack(map(np.ravel, meshgrid2(x, y, z)))
|
||||
|
||||
file['croak'].write('grains to map: %i\n'%info['microstructures'] + \
|
||||
'grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'])
|
||||
indices = damask.core.math.periodicNearestNeighbor(\
|
||||
info['size'],\
|
||||
np.eye(3),\
|
||||
undeformed,coords)//3**3 + 1 # floor division to kill periodic images
|
||||
indices = grains[indices-1]
|
||||
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
grainIDs = np.intersect1d(grainIDs,indices)
|
||||
info['microstructures'] = len(grainIDs)
|
||||
|
||||
if info['homogenization'] == 0: info['homogenization'] = options.homogenization
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
if info['microstructures'] == 0:
|
||||
file['croak'].write('no grain info found.\n')
|
||||
continue
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i%s'%(info['microstructures'],
|
||||
(' out of %i'%NgrainIDs if NgrainIDs != info['microstructures'] else '')),
|
||||
])
|
||||
|
||||
#--- prepare data ---------------------------------------------------------------------------------
|
||||
eulers = eulers.T
|
||||
config_header = []
|
||||
formatwidth = 1+int(math.log10(info['microstructures']))
|
||||
|
||||
#--- switch according to task ---------------------------------------------------------------------
|
||||
if options.config: # write config file
|
||||
phase = np.empty(info['microstructures'],'i')
|
||||
phase.fill(options.phase)
|
||||
phase[0:int(float(info['microstructures'])*options.secondphase)] = options.phase+1
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase
|
||||
phase = options.phase * np.ones(info['microstructures'],'i')
|
||||
if int(options.secondphase*info['microstructures']) > 0:
|
||||
phase[0:int(options.secondphase*info['microstructures'])] += 1
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None \
|
||||
else options.randomSeed # random seed for second phase
|
||||
np.random.seed(randomSeed)
|
||||
np.random.shuffle(phase)
|
||||
formatwidth = 1+int(math.log10(info['microstructures']))
|
||||
file['output'].write('#' + scriptID + ' ' + ' '.join(sys.argv[1:])+'\n')
|
||||
if options.secondphase > 0.0: file['output'].write('# random seed for second phase %i\n'%randomSeed)
|
||||
file['output'].write('\n<microstructure>\n')
|
||||
for i,ID in enumerate(grainIDs):
|
||||
file['output'].write('\n[Grain%s]\n'%(str(ID).zfill(formatwidth)) + \
|
||||
'crystallite %i\n'%options.crystallite + \
|
||||
'(constituent)\tphase %i\ttexture %s\tfraction 1.0\n'%(phase[i],str(ID).rjust(formatwidth)))
|
||||
|
||||
file['output'].write('\n<texture>\n')
|
||||
config_header += ['# random seed (phase shuffling): {}'.format(randomSeed)]
|
||||
|
||||
config_header += ['<microstructure>']
|
||||
for i,ID in enumerate(grainIDs):
|
||||
config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)),
|
||||
'crystallite %i'%options.crystallite,
|
||||
'(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase[i],str(ID).rjust(formatwidth)),
|
||||
]
|
||||
if hasEulers:
|
||||
config_header += ['<texture>']
|
||||
for ID in grainIDs:
|
||||
eulerID = np.nonzero(grain == ID)[0][0] # find first occurrence of this grain id
|
||||
file['output'].write('\n[Grain%s]\n'%(str(ID).zfill(formatwidth)) + \
|
||||
'(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0\n'%(eulers[0,eulerID],
|
||||
eulers[1,eulerID],
|
||||
eulers[2,eulerID]))
|
||||
|
||||
else: # write geometry file
|
||||
x = (np.arange(info['grid'][0])+0.5)*info['size'][0]/info['grid'][0]
|
||||
y = (np.arange(info['grid'][1])+0.5)*info['size'][1]/info['grid'][1]
|
||||
z = (np.arange(info['grid'][2])+0.5)*info['size'][2]/info['grid'][2]
|
||||
|
||||
if not options.laguerre:
|
||||
coords = (coords*info['size']).T
|
||||
undeformed = np.vstack(map(np.ravel, meshgrid2(x, y, z)))
|
||||
eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id
|
||||
config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)),
|
||||
'axes\t%s %s %s'%tuple(options.axes) if options.axes != None else '',
|
||||
'(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(eulers[eulerID]),
|
||||
]
|
||||
|
||||
file['croak'].write('tessellating...\n')
|
||||
indices = damask.core.math.periodicNearestNeighbor(\
|
||||
info['size'],\
|
||||
np.eye(3),\
|
||||
undeformed,coords)//3**3 + 1 # floor division to kill periodic images
|
||||
indices = grain[indices-1]
|
||||
else :
|
||||
undeformed = np.vstack(np.meshgrid(x, y, z)).reshape(3,-1).T
|
||||
indices = laguerreTessellation(undeformed, coords, weights, grain)
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append([
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
config_header,
|
||||
])
|
||||
table.head_write()
|
||||
|
||||
newInfo['microstructures'] = info['microstructures']
|
||||
for i in grainIDs:
|
||||
if i not in indices: newInfo['microstructures'] -= 1
|
||||
file['croak'].write(('all' if newInfo['microstructures'] == info['microstructures'] else 'only') +
|
||||
' %i'%newInfo['microstructures'] +
|
||||
('' if newInfo['microstructures'] == info['microstructures'] else \
|
||||
' out of %i'%info['microstructures']) +
|
||||
' grains mapped.\n')
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
])
|
||||
table.head_write()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
formatwidth = 1+int(math.log10(newInfo['microstructures']))
|
||||
table.data = indices.reshape(info['grid'][1]*info['grid'][2],info['grid'][0])
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
|
||||
table.data = indices.reshape(info['grid'][1]*info['grid'][2],info['grid'][0])
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close()
|
||||
if file['name'] != 'STDIN':
|
||||
os.rename(file['name']+'_tmp',
|
||||
os.path.splitext(file['name'])[0] +'%s'%('_material.config' if options.config else '.geom'))
|
||||
|
|
|
@ -12,18 +12,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
compress geometry files with ranges "a to b" and/or multiples "n of x".
|
||||
|
@ -32,84 +20,55 @@ compress geometry files with ranges "a to b" and/or multiples "n of x".
|
|||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles -----------------------------------
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'croak':sys.stdout})
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
# ------------------------------------------ loop over input files --------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
if file['name'] != 'STDIN':
|
||||
file['input'] = open(file['name'])
|
||||
file['output'] = open(file['name']+'_tmp','w')
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
#--- interpret header -----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %e\ty %e\tz %e"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %e\ty %e\tz %e"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(info['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write packed microstructure information -----------------------------------------------------
|
||||
|
||||
type = ''
|
||||
former = -1
|
||||
start = -1
|
||||
former = start = -1
|
||||
reps = 0
|
||||
|
||||
outputAlive = True
|
||||
|
@ -134,11 +93,12 @@ for file in files:
|
|||
elif type == '.':
|
||||
table.data = [str(former)]
|
||||
elif type == 'to':
|
||||
table.data = ['%i to %i'%(former-reps+1,former)]
|
||||
table.data = ['{0} to {1}'.format(former-reps+1,former)]
|
||||
elif type == 'of':
|
||||
table.data = ['%i of %i'%(reps,former)]
|
||||
table.data = ['{0} of {1}'.format(reps,former)]
|
||||
|
||||
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
||||
|
||||
type = '.'
|
||||
start = current
|
||||
reps = 1
|
||||
|
@ -146,18 +106,14 @@ for file in files:
|
|||
former = current
|
||||
|
||||
table.data = {
|
||||
'.' : [str(former)],
|
||||
'to': ['%i to %i'%(former-reps+1,former)],
|
||||
'of': ['%i of %i'%(reps,former)],
|
||||
}[type]
|
||||
'.' : [str(former)],
|
||||
'to': ['%i to %i'%(former-reps+1,former)],
|
||||
'of': ['%i of %i'%(reps,former)],
|
||||
}[type]
|
||||
|
||||
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
||||
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close() # close input ASCII table
|
||||
table.output_close() # close input ASCII table
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,18 +12,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Scales a geometry description independently in x, y, and z direction in terms of grid and/or size.
|
||||
|
@ -31,102 +19,67 @@ Either absolute values or relative factors (like "0.25x") can be used.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-g', '--grid', dest='grid', nargs = 3, metavar = 'string string string', \
|
||||
help='a,b,c grid of hexahedral box [unchanged]')
|
||||
parser.add_option('-s', '--size', dest='size', nargs = 3, metavar = 'string string string', \
|
||||
help='x,y,z size of hexahedral box [unchanged]')
|
||||
parser.add_option('-r', '--renumber', dest='renumber', action='store_true', \
|
||||
help='renumber microstructure indices from 1...N [%default]')
|
||||
parser.add_option('-g', '--grid',
|
||||
dest = 'grid',
|
||||
type = 'string', nargs = 3, metavar = 'string string string',
|
||||
help = 'a,b,c grid of hexahedral box [unchanged]')
|
||||
parser.add_option('-s', '--size',
|
||||
dest = 'size',
|
||||
type = 'string', nargs = 3, metavar = 'string string string',
|
||||
help = 'x,y,z size of hexahedral box [unchanged]')
|
||||
parser.add_option('-r', '--renumber',
|
||||
dest = 'renumber',
|
||||
action = 'store_true',
|
||||
help = 'renumber microstructure indices from 1..N [%default]')
|
||||
|
||||
parser.set_defaults(renumber = False)
|
||||
parser.set_defaults(grid = ['0','0','0'])
|
||||
parser.set_defaults(size = ['0.0','0.0','0.0'])
|
||||
parser.set_defaults(renumber = False,
|
||||
grid = ['0','0','0'],
|
||||
size = ['0.0','0.0','0.0'],
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
#--- setup file handles ---------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name+'_tmp','w'),
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels=False)
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']) # read microstructure
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
newInfo = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||
i = 0
|
||||
|
||||
while table.data_read():
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- do work ------------------------------------------------------------------------------------
|
||||
'grid': np.zeros(3,'i'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
newInfo['grid'] = np.array([{True:round(o*float(n.translate(None,'xX'))), False: round(float(n.translate(None,'xX')))}[n[-1].lower() == 'x'] for o,n in zip(info['grid'],options.grid)],'i')
|
||||
newInfo['size'] = np.array([{True: o*float(n.translate(None,'xX')) , False: float(n.translate(None,'xX')) }[n[-1].lower() == 'x'] for o,n in zip(info['size'],options.size)],'d')
|
||||
|
@ -146,56 +99,60 @@ for file in files:
|
|||
microstructure = np.repeat(
|
||||
np.repeat(
|
||||
np.repeat(microstructure,multiplicity[0], axis=0),
|
||||
multiplicity[1], axis=1),
|
||||
multiplicity[2], axis=2)
|
||||
multiplicity[1], axis=1),
|
||||
multiplicity[2], axis=2)
|
||||
# --- renumber to sequence 1...Ngrains if requested ------------------------------------------------
|
||||
# http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array
|
||||
|
||||
if options.renumber:
|
||||
newID=0
|
||||
newID = 0
|
||||
for microstructureID,count in enumerate(np.bincount(microstructure.reshape(newInfo['grid'].prod()))):
|
||||
if count != 0:
|
||||
newID+=1
|
||||
microstructure=np.where(microstructure==microstructureID,newID,microstructure).reshape(microstructure.shape)
|
||||
newID += 1
|
||||
microstructure = np.where(microstructure == microstructureID, newID,microstructure).reshape(microstructure.shape)
|
||||
|
||||
newInfo['microstructures'] = microstructure.max()
|
||||
|
||||
#--- report ---------------------------------------------------------------------------------------
|
||||
if (any(newInfo['grid'] != info['grid'])):
|
||||
file['croak'].write('--> grid a b c: %s\n'%(' x '.join(map(str,newInfo['grid']))))
|
||||
if (any(newInfo['size'] != info['size'])):
|
||||
file['croak'].write('--> size x y z: %s\n'%(' x '.join(map(str,newInfo['size']))))
|
||||
if (newInfo['microstructures'] != info['microstructures']):
|
||||
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
|
||||
|
||||
if np.any(newInfo['grid'] < 1):
|
||||
file['croak'].write('invalid new grid a b c.\n')
|
||||
continue
|
||||
if np.any(newInfo['size'] <= 0.0):
|
||||
file['croak'].write('invalid new size x y z.\n')
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
remarks = []
|
||||
errors = []
|
||||
|
||||
if (any(newInfo['grid'] != info['grid'])): remarks.append('--> grid a b c: %s'%(' x '.join(map(str,newInfo['grid']))))
|
||||
if (any(newInfo['size'] != info['size'])): remarks.append('--> size x y z: %s'%(' x '.join(map(str,newInfo['size']))))
|
||||
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
|
||||
|
||||
if np.any(newInfo['grid'] < 1): errors.append('invalid new grid a b c.')
|
||||
if np.any(newInfo['size'] <= 0.0): errors.append('invalid new size x y z.')
|
||||
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
table.labels_clear()
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(newInfo['grid'][0],newInfo['grid'][1],newInfo['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(newInfo['size'][0],newInfo['size'][1],newInfo['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
||||
table.data = microstructure.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close()
|
||||
table.output_close()
|
||||
os.rename(file['name']+'_tmp',file['name'])
|
||||
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -13,125 +13,78 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile[s]]', description = """
|
||||
Produce ASCIItable of structure data from geom description
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-p','--position',
|
||||
dest = 'position',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for position [%default]')
|
||||
|
||||
parser.set_defaults(position = 'pos',
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
#--- setup file handles --------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
if file['name'] != 'STDIN':
|
||||
file['input'] = open(file['name'])
|
||||
file['output'] = open(os.path.splitext(file['name'])[0]+'.txt','w')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m' + (': '+file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.txt',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
theTable = damask.ASCIItable(file['input'],file['output'],labels = False)
|
||||
theTable.head_read()
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
for header in theTable.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']) # read microstructure
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
theTable.labels_clear()
|
||||
theTable.labels_append(['%i_pos'%(i+1) for i in range(3)]+['microstructure'])
|
||||
|
||||
theTable.head_write()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header + [scriptID + '\t' + ' '.join(sys.argv[1:])])
|
||||
table.labels_clear()
|
||||
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
#--- generate grid --------------------------------------------------------------------------------
|
||||
|
||||
xx = np.arange(float(info['grid'][0]))/info['grid'][0]*info['size'][0]+info['origin'][0]
|
||||
yy = np.arange(float(info['grid'][1]))/info['grid'][1]*info['size'][1]+info['origin'][1]
|
||||
zz = np.arange(float(info['grid'][2]))/info['grid'][2]*info['size'][2]+info['origin'][2]
|
||||
|
||||
#--- read microstructure information --------------------------------------------------------------
|
||||
x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0]
|
||||
y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1]
|
||||
z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2]
|
||||
|
||||
i = 0
|
||||
outputAlive = True
|
||||
|
||||
while outputAlive and theTable.data_read():
|
||||
items = theTable.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
xx = np.tile( x, info['grid'][1]* info['grid'][2])
|
||||
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
|
||||
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
|
||||
|
||||
for item in items:
|
||||
theTable.data = [xx[ i%info['grid'][0]],
|
||||
yy[(i/info['grid'][0])%info['grid'][1]],
|
||||
zz[ i/info['grid'][0]/info['grid'][1]],
|
||||
item]
|
||||
i += 1
|
||||
outputAlive = theTable.data_write() # output processed line
|
||||
if not outputAlive: break
|
||||
table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))
|
||||
table.data_writeArray()
|
||||
|
||||
# ------------------------------------------ finalize output ---------------------------------------
|
||||
|
||||
theTable.output_flush() # just in case of buffered ASCII table
|
||||
|
||||
if file['name'] != 'STDIN':
|
||||
file['input'].close() # close input ASCII table
|
||||
file['output'].close() # close output ASCII table
|
||||
table.close()
|
||||
|
|
|
@ -12,35 +12,29 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
translate microstructure indices (shift or substitute) and/or geometry origin.
|
||||
|
||||
""", version=scriptID)
|
||||
|
||||
parser.add_option('-o', '--origin', dest='origin', type='float', nargs = 3,
|
||||
help='offset from old to new origin of grid', metavar=' '.join(['float']*3))
|
||||
parser.add_option('-m', '--microstructure', dest='microstructure', type='int',
|
||||
help='offset from old to new microstructure indices', metavar='int')
|
||||
parser.add_option('-s', '--substitute', action='extend', dest='substitute',
|
||||
help='substitutions of microstructure indices from,to,from,to,...', metavar='<string LIST>')
|
||||
parser.add_option('-o', '--origin',
|
||||
dest = 'origin',
|
||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
||||
help = 'offset from old to new origin of grid')
|
||||
parser.add_option('-m', '--microstructure',
|
||||
dest = 'microstructure',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'offset from old to new microstructure indices')
|
||||
parser.add_option('-s', '--substitute',
|
||||
dest = 'substitute',
|
||||
action = 'extend', metavar = '<string LIST>',
|
||||
help = 'substitutions of microstructure indices from,to,from,to,...')
|
||||
|
||||
parser.set_defaults(origin = (0.0,0.0,0.0))
|
||||
parser.set_defaults(microstructure = 0)
|
||||
parser.set_defaults(substitute = [])
|
||||
parser.set_defaults(twoD = False)
|
||||
parser.set_defaults(origin = (0.0,0.0,0.0),
|
||||
microstructure = 0,
|
||||
substitute = [],
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
|
@ -48,121 +42,84 @@ sub = {}
|
|||
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
|
||||
sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1])
|
||||
|
||||
#--- setup file handles ---------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name+'_tmp','w'),
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
theTable = damask.ASCIItable(file['input'],file['output'],labels=False)
|
||||
theTable.head_read()
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']) # read microstructure
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
newInfo = {
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
for header in theTable.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||
i = 0
|
||||
while theTable.data_read():
|
||||
items = theTable.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- do work ------------------------------------------------------------------------------------
|
||||
substituted = np.copy(microstructure)
|
||||
for k, v in sub.iteritems(): substituted[microstructure==k] = v # substitute microstructure indices
|
||||
for k, v in sub.iteritems(): substituted[microstructure==k] = v # substitute microstructure indices
|
||||
|
||||
substituted += options.microstructure # shift microstructure indices
|
||||
substituted += options.microstructure # shift microstructure indices
|
||||
|
||||
newInfo['origin'] = info['origin'] + options.origin
|
||||
newInfo['microstructures'] = substituted.max()
|
||||
|
||||
#--- report ---------------------------------------------------------------------------------------
|
||||
if (any(newInfo['origin'] != info['origin'])):
|
||||
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin']))))
|
||||
if (newInfo['microstructures'] != info['microstructures']):
|
||||
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
theTable.labels_clear()
|
||||
theTable.info_clear()
|
||||
theTable.info_append(extra_header+[
|
||||
remarks = []
|
||||
if (any(newInfo['origin'] != info['origin'])): remarks.append('--> origin x y z: %s'%(' : '.join(map(str,newInfo['origin']))))
|
||||
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
|
||||
if remarks != []: file['croak'](remarks)
|
||||
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=newInfo['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
theTable.head_write()
|
||||
theTable.output_flush()
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
formatwidth = int(math.floor(math.log10(substituted.max())+1))
|
||||
theTable.data = substituted.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
theTable.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
||||
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
theTable.input_close()
|
||||
theTable.output_close()
|
||||
os.rename(file['name']+'_tmp',file['name'])
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,122 +12,75 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (exclusively in one line).
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-1', '--onedimensional', dest='oneD', action='store_true', \
|
||||
help='output geom file with one-dimensional data arrangement [%default]')
|
||||
parser.add_option('-1', '--onedimensional',
|
||||
dest = 'oneD',
|
||||
action = 'store_true',
|
||||
help = 'output geom file with one-dimensional data arrangement [%default]')
|
||||
|
||||
parser.set_defaults(oneD = False)
|
||||
parser.set_defaults(oneD = False,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ------------------------------------------ setup file handles ------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
# ------------------------------------------ loop over input files ---------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
||||
table.head_read() # read ASCII header info
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||
i = 0
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
while table.data_read(): # read next data line of ASCII table
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %e\ty %e\tz %e"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %e\ty %e\tz %e"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(info['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
||||
])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
||||
if options.oneD:
|
||||
table.data = microstructure
|
||||
else:
|
||||
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']) # read microstructure
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1)) # efficient number printing format
|
||||
if options.oneD: table.data = microstructure
|
||||
else: table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close() # close input ASCII table
|
||||
table.output_close() # close input ASCII table
|
||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -13,18 +13,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Offset microstructure index for points which see a microstructure different from themselves within a given (cubic) vicinity,
|
||||
|
@ -32,131 +20,98 @@ i.e. within the region close to a grain/phase boundary.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-v', '--vicinity', dest='vicinity', type='int', metavar='int', \
|
||||
help='voxel distance checked for presence of other microstructure [%default]')
|
||||
parser.add_option('-m', '--microstructureoffset', dest='offset', type='int', metavar='int', \
|
||||
help='offset (positive or negative) for tagged microstructure. '+
|
||||
'"0" selects maximum microstructure index [%default]')
|
||||
parser.add_option('-v', '--vicinity',
|
||||
dest = 'vicinity',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'voxel distance checked for presence of other microstructure [%default]')
|
||||
parser.add_option('-m', '--microstructureoffset',
|
||||
dest='offset',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'offset (positive or negative) for tagged microstructure indices. '+
|
||||
'"0" selects maximum microstructure index [%default]')
|
||||
|
||||
parser.set_defaults(vicinity = 1)
|
||||
parser.set_defaults(offset = 0)
|
||||
parser.set_defaults(vicinity = 1,
|
||||
offset = 0,
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
#--- setup file handles --------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(name+'_tmp','w'),
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels=False)
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
newInfo = {
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||
i = 0
|
||||
table.data_rewind()
|
||||
while table.data_read():
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- do work ------------------------------------------------------------------------------------
|
||||
microstructure = microstructure.reshape(info['grid'],order='F')
|
||||
if options.offset == 0:
|
||||
options.offset = microstructure.max()
|
||||
if options.offset == 0: options.offset = microstructure.max()
|
||||
|
||||
microstructure = np.where(ndimage.filters.maximum_filter(microstructure,size=1+2*options.vicinity,mode='wrap') ==
|
||||
ndimage.filters.minimum_filter(microstructure,size=1+2*options.vicinity,mode='wrap'),
|
||||
microstructure, microstructure + options.offset)
|
||||
ndimage.filters.minimum_filter(microstructure,size=1+2*options.vicinity,mode='wrap'),
|
||||
microstructure, microstructure + options.offset)
|
||||
|
||||
newInfo['microstructures'] = microstructure.max()
|
||||
if (newInfo['microstructures'] != info['microstructures']):
|
||||
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
remarks = []
|
||||
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
|
||||
if remarks != []: file['croak'](remarks)
|
||||
|
||||
# --- write header ---------------------------------------------------------------------------------
|
||||
|
||||
table.labels_clear()
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write microstructure information ------------------------------------------------------------
|
||||
|
||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
||||
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ')
|
||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
table.input_close()
|
||||
table.output_close()
|
||||
os.rename(file['name']+'_tmp',file['name'])
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new
|
||||
|
|
|
@ -12,54 +12,46 @@ scriptName = scriptID.split()[1]
|
|||
def integerFactorization(i):
|
||||
|
||||
j = int(math.floor(math.sqrt(float(i))))
|
||||
while (j>1 and int(i)%j != 0):
|
||||
while j>1 and int(i)%j != 0:
|
||||
j -= 1
|
||||
return j
|
||||
|
||||
def positiveRadians(angle):
|
||||
|
||||
angle = math.radians(float(angle))
|
||||
while angle < 0.0:
|
||||
angle += 2.0*math.pi
|
||||
|
||||
return angle
|
||||
|
||||
|
||||
def getHeader(sizeX,sizeY,step):
|
||||
def TSLheader(sizeX,sizeY,step):
|
||||
|
||||
return [ \
|
||||
'# TEM_PIXperUM 1.000000', \
|
||||
'# x-star 0.509548', \
|
||||
'# y-star 0.795272', \
|
||||
'# z-star 0.611799', \
|
||||
'# WorkingDistance 18.000000', \
|
||||
'#', \
|
||||
'# Phase 1', \
|
||||
'# MaterialName Al', \
|
||||
'# Formula Fe', \
|
||||
'# Info', \
|
||||
'# Symmetry 43', \
|
||||
'# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000', \
|
||||
'# NumberFamilies 4', \
|
||||
'# hklFamilies 1 1 0 1 0.000000 1', \
|
||||
'# hklFamilies 2 0 0 1 0.000000 1', \
|
||||
'# hklFamilies 2 1 1 1 0.000000 1', \
|
||||
'# hklFamilies 3 1 0 1 0.000000 1', \
|
||||
'# Categories 0 0 0 0 0 ', \
|
||||
'#', \
|
||||
'# GRID: SquareGrid', \
|
||||
'# XSTEP: ' + str(step), \
|
||||
'# YSTEP: ' + str(step), \
|
||||
'# NCOLS_ODD: ' + str(sizeX), \
|
||||
'# NCOLS_EVEN: ' + str(sizeX), \
|
||||
'# NROWS: ' + str(sizeY), \
|
||||
'#', \
|
||||
'# OPERATOR: ODFsammpling', \
|
||||
'#', \
|
||||
'# SAMPLEID: ', \
|
||||
'#', \
|
||||
'# SCANID: ', \
|
||||
'#', \
|
||||
return [
|
||||
'# TEM_PIXperUM 1.000000',
|
||||
'# x-star 0.509548',
|
||||
'# y-star 0.795272',
|
||||
'# z-star 0.611799',
|
||||
'# WorkingDistance 18.000000',
|
||||
'#',
|
||||
'# Phase 1',
|
||||
'# MaterialName Al',
|
||||
'# Formula Fe',
|
||||
'# Info',
|
||||
'# Symmetry 43',
|
||||
'# LatticeConstants 2.870 2.870 2.870 90.000 90.000 90.000',
|
||||
'# NumberFamilies 4',
|
||||
'# hklFamilies 1 1 0 1 0.000000 1',
|
||||
'# hklFamilies 2 0 0 1 0.000000 1',
|
||||
'# hklFamilies 2 1 1 1 0.000000 1',
|
||||
'# hklFamilies 3 1 0 1 0.000000 1',
|
||||
'# Categories 0 0 0 0 0 ',
|
||||
'#',
|
||||
'# GRID: SquareGrid',
|
||||
'# XSTEP: ' + str(step),
|
||||
'# YSTEP: ' + str(step),
|
||||
'# NCOLS_ODD: ' + str(sizeX),
|
||||
'# NCOLS_EVEN: ' + str(sizeX),
|
||||
'# NROWS: ' + str(sizeY),
|
||||
'#',
|
||||
'# OPERATOR: ODFsammpling',
|
||||
'#',
|
||||
'# SAMPLEID: ',
|
||||
'#',
|
||||
'# SCANID: ',
|
||||
'#',
|
||||
]
|
||||
|
||||
def binAsBins(bin,intervals):
|
||||
|
@ -91,8 +83,8 @@ def binAsEulers(bin,intervals,deltas,center):
|
|||
def directInvRepetitions(probability,scale):
|
||||
""" calculate number of samples drawn by direct inversion """
|
||||
nDirectInv = 0
|
||||
for bin in range(len(probability)): # loop over bins
|
||||
nDirectInv += int(round(probability[bin]*scale)) # calc repetition
|
||||
for bin in range(len(probability)): # loop over bins
|
||||
nDirectInv += int(round(probability[bin]*scale)) # calc repetition
|
||||
return nDirectInv
|
||||
|
||||
|
||||
|
@ -103,11 +95,11 @@ def directInvRepetitions(probability,scale):
|
|||
def directInversion (ODF,nSamples):
|
||||
""" ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """
|
||||
|
||||
nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested
|
||||
nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested
|
||||
|
||||
nInvSamples = 0
|
||||
repetition = [None]*ODF['nBins']
|
||||
probabilityScale = nOptSamples # guess
|
||||
probabilityScale = nOptSamples # guess
|
||||
|
||||
scaleLower = 0.0
|
||||
nInvSamplesLower = 0
|
||||
|
@ -118,7 +110,7 @@ def directInversion (ODF,nSamples):
|
|||
while (\
|
||||
(scaleUpper-scaleLower > scaleUpper*1e-15 or nInvSamplesUpper < nOptSamples) and \
|
||||
nInvSamplesUpper != nOptSamples \
|
||||
): # closer match required?
|
||||
): # closer match required?
|
||||
if nInvSamplesUpper < nOptSamples:
|
||||
scaleLower,scaleUpper = scaleUpper,scaleUpper+incFactor*(scaleUpper-scaleLower)/2.0
|
||||
incFactor *= 2.0
|
||||
|
@ -128,36 +120,37 @@ def directInversion (ODF,nSamples):
|
|||
incFactor = 1.0
|
||||
nInvSamplesUpper = directInvRepetitions(ODF['dV_V'],scaleUpper)
|
||||
nIter += 1
|
||||
file['croak'].write('%i:(%12.11f,%12.11f) %i <= %i <= %i\n'\
|
||||
%(nIter,scaleLower,scaleUpper,nInvSamplesLower,nOptSamples,nInvSamplesUpper))
|
||||
table.croak('%i:(%12.11f,%12.11f) %i <= %i <= %i'%(nIter,scaleLower,scaleUpper,
|
||||
nInvSamplesLower,nOptSamples,nInvSamplesUpper))
|
||||
nInvSamples = nInvSamplesUpper
|
||||
scale = scaleUpper
|
||||
file['croak'].write('created set of %i samples (%12.11f) with scaling %12.11f delivering %i\n'\
|
||||
%(nInvSamples,float(nInvSamples)/nOptSamples-1.0,scale,nSamples))
|
||||
repetition = [None]*ODF['nBins'] # preallocate and clear
|
||||
table.croak('created set of %i samples (%12.11f) with scaling %12.11f delivering %i'%(nInvSamples,
|
||||
float(nInvSamples)/nOptSamples-1.0,
|
||||
scale,nSamples))
|
||||
repetition = [None]*ODF['nBins'] # preallocate and clear
|
||||
|
||||
for bin in range(ODF['nBins']): # loop over bins
|
||||
repetition[bin] = int(round(ODF['dV_V'][bin]*scale)) # calc repetition
|
||||
for bin in range(ODF['nBins']): # loop over bins
|
||||
repetition[bin] = int(round(ODF['dV_V'][bin]*scale)) # calc repetition
|
||||
|
||||
# build set
|
||||
set = [None]*nInvSamples
|
||||
i = 0
|
||||
for bin in range(ODF['nBins']):
|
||||
set[i:i+repetition[bin]] = [bin]*repetition[bin] # fill set with bin, i.e. orientation
|
||||
i += repetition[bin] # advance set counter
|
||||
set[i:i+repetition[bin]] = [bin]*repetition[bin] # fill set with bin, i.e. orientation
|
||||
i += repetition[bin] # advance set counter
|
||||
|
||||
orientations = [None]*nSamples
|
||||
reconstructedODF = [0.0]*ODF['nBins']
|
||||
orientations = np.zeros((nSamples,3),'f')
|
||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
||||
unitInc = 1.0/nSamples
|
||||
for j in range(nSamples):
|
||||
if (j == nInvSamples-1): ex = j
|
||||
else: ex = int(round(random.uniform(j+0.5,nInvSamples-0.5)))
|
||||
bin = set[ex]
|
||||
bins = binAsBins(bin,ODF['interval'])
|
||||
bins = binAsBins(bin,ODF['interval']) # PE: why are we doing this??
|
||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
||||
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) )
|
||||
orientations[j] = np.degrees(Eulers)
|
||||
reconstructedODF[bin] += unitInc
|
||||
set[ex] = set[j] # exchange orientations
|
||||
set[ex] = set[j] # exchange orientations
|
||||
|
||||
return orientations, reconstructedODF
|
||||
|
||||
|
@ -169,8 +162,8 @@ def MonteCarloEulers (ODF,nSamples):
|
|||
|
||||
countMC = 0
|
||||
maxdV_V = max(ODF['dV_V'])
|
||||
orientations = [None]*nSamples
|
||||
reconstructedODF = [0.0]*ODF['nBins']
|
||||
orientations = np.zeros((nSamples,3),'f')
|
||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
||||
unitInc = 1.0/nSamples
|
||||
|
||||
for j in range(nSamples):
|
||||
|
@ -182,7 +175,7 @@ def MonteCarloEulers (ODF,nSamples):
|
|||
Eulers = [limit*random.random() for limit in ODF['limit']]
|
||||
bins = EulersAsBins(Eulers,ODF['interval'],ODF['delta'],ODF['center'])
|
||||
bin = binsAsBin(bins,ODF['interval'])
|
||||
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) )
|
||||
orientations[j] = np.degrees(Eulers)
|
||||
reconstructedODF[bin] += unitInc
|
||||
|
||||
return orientations, reconstructedODF, countMC
|
||||
|
@ -193,8 +186,8 @@ def MonteCarloBins (ODF,nSamples):
|
|||
|
||||
countMC = 0
|
||||
maxdV_V = max(ODF['dV_V'])
|
||||
orientations = [None]*nSamples
|
||||
reconstructedODF = [0.0]*ODF['nBins']
|
||||
orientations = np.zeros((nSamples,3),'f')
|
||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
||||
unitInc = 1.0/nSamples
|
||||
|
||||
for j in range(nSamples):
|
||||
|
@ -205,7 +198,7 @@ def MonteCarloBins (ODF,nSamples):
|
|||
MC = maxdV_V*random.random()
|
||||
bin = int(ODF['nBins'] * random.random())
|
||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
||||
orientations[j] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) )
|
||||
orientations[j] = np.degrees(Eulers)
|
||||
reconstructedODF[bin] += unitInc
|
||||
|
||||
return orientations, reconstructedODF
|
||||
|
@ -214,8 +207,8 @@ def MonteCarloBins (ODF,nSamples):
|
|||
def TothVanHoutteSTAT (ODF,nSamples):
|
||||
""" ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians) """
|
||||
|
||||
orientations = [None]*nSamples
|
||||
reconstructedODF = [0.0]*ODF['nBins']
|
||||
orientations = np.zeros((nSamples,3),'f')
|
||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
||||
unitInc = 1.0/nSamples
|
||||
|
||||
selectors = [random.random() for i in range(nSamples)]
|
||||
|
@ -229,11 +222,12 @@ def TothVanHoutteSTAT (ODF,nSamples):
|
|||
cumdV_V += ODF['dV_V'][bin]
|
||||
while indexSelector < nSamples and selectors[indexSelector] < cumdV_V:
|
||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
||||
orientations[countSamples] = '%g\t%g\t%g' %( math.degrees(Eulers[0]),math.degrees(Eulers[1]),math.degrees(Eulers[2]) )
|
||||
orientations[countSamples] = np.degrees(Eulers)
|
||||
reconstructedODF[bin] += unitInc
|
||||
countSamples += 1
|
||||
indexSelector += 1
|
||||
file['croak'].write('created set of %i when asked to deliver %i\n'%(countSamples,nSamples))
|
||||
|
||||
table.croak('created set of %i when asked to deliver %i'%(countSamples,nSamples))
|
||||
|
||||
return orientations, reconstructedODF
|
||||
|
||||
|
@ -242,73 +236,83 @@ def TothVanHoutteSTAT (ODF,nSamples):
|
|||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Transform linear binned data into Euler angles.
|
||||
Transform linear binned ODF data into given number of orientations.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-n', '--nsamples', dest='number', type='int', metavar = 'int',
|
||||
help='number of orientations to be generated [%default]')
|
||||
parser.add_option('-a','--algorithm', dest='algorithm', type='string', metavar = 'string',
|
||||
help='sampling algorithm. IA: direct inversion, STAT: Van Houtte, MC: Monte Carlo. [%default].') #make choice
|
||||
parser.add_option('-p','--phase', dest='phase', type='int', metavar = 'int',
|
||||
help='phase index to be used [%default]')
|
||||
parser.add_option('--crystallite', dest='crystallite', type='int', metavar = 'int',
|
||||
help='crystallite index to be used [%default]')
|
||||
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', \
|
||||
help='seed of random number generator [%default]')
|
||||
parser.add_option('--ang', dest='ang', action='store_true',
|
||||
help='write .ang file [%default]')
|
||||
parser.set_defaults(randomSeed = None)
|
||||
parser.set_defaults(number = 500)
|
||||
parser.set_defaults(algorithm = 'IA')
|
||||
parser.set_defaults(phase = 1)
|
||||
parser.set_defaults(crystallite = 1)
|
||||
parser.set_defaults(ang = True)
|
||||
parser.add_option('-n', '--nsamples',
|
||||
dest = 'number',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'number of orientations to be generated [%default]')
|
||||
parser.add_option('-a','--algorithm',
|
||||
dest = 'algorithm',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'sampling algorithm. IA: integral approximation, STAT: Van Houtte, MC: Monte Carlo. [%default].') #make choice
|
||||
parser.add_option('-p','--phase',
|
||||
dest = 'phase',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'phase index to be used [%default]')
|
||||
parser.add_option('--crystallite',
|
||||
dest = 'crystallite',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'crystallite index to be used [%default]')
|
||||
parser.add_option('-r', '--rnd',
|
||||
dest = 'randomSeed',
|
||||
type = 'int', metavar = 'int', \
|
||||
help = 'seed of random number generator [%default]')
|
||||
parser.add_option('--ang',
|
||||
dest = 'ang',
|
||||
action = 'store_true',
|
||||
help = 'write TSL/EDAX .ang file [%default]')
|
||||
parser.set_defaults(randomSeed = None,
|
||||
number = 500,
|
||||
algorithm = 'IA',
|
||||
phase = 1,
|
||||
crystallite = 1,
|
||||
ang = True,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
nSamples = options.number
|
||||
methods = [options.algorithm]
|
||||
|
||||
|
||||
#--- setup file handles ---------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN','input':sys.stdin,'output':sys.stdout,'outang':sys.stdout,'croak':sys.stderr})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,'input':open(name),'output':open(name+'_tmp','w'),'outang':open(name+'_ang_tmp','w'),'croak':sys.stdout})
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered = False)
|
||||
table.head_read()
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # radom seed per file for second phase
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file for second phase
|
||||
random.seed(randomSeed)
|
||||
|
||||
# --------------- figure out columns in table ----------- -----------------------------------------
|
||||
column = {}
|
||||
pos = 0
|
||||
keys = ['phi1','Phi','phi2','intensity']
|
||||
for key in keys:
|
||||
if key not in table.labels:
|
||||
file['croak'].write('column %s not found...\n'%key)
|
||||
else:
|
||||
column[key] = pos
|
||||
pos+=1
|
||||
if pos != 4: continue
|
||||
# ------------------------------------------ read header ---------------------------------------
|
||||
|
||||
binnedODF = table.data_readArray(keys)
|
||||
table.head_read()
|
||||
|
||||
errors = []
|
||||
labels = ['phi1','Phi','phi2','intensity']
|
||||
for i,index in enumerate(table.label_index(labels)):
|
||||
if index < 0: errors.append('label {} not present.'.format(labels[i])
|
||||
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# ------------------------------------------ read data ---------------------------------------
|
||||
|
||||
binnedODF = table.data_readArray(labels)
|
||||
|
||||
# --------------- figure out limits (left/right), delta, and interval -----------------------------
|
||||
ODF = {}
|
||||
limits = np.array([[np.min(table.data[:,column['phi1']]),\
|
||||
np.min(table.data[:,column['Phi']]),\
|
||||
np.min(table.data[:,column['phi2']])],\
|
||||
[np.max(table.data[:,column['phi1']]),\
|
||||
np.max(table.data[:,column['Phi']]),\
|
||||
np.max(table.data[:,column['phi2']])]])
|
||||
limits = np.array([np.min(table.data,axis=0),
|
||||
np.max(table.data,axis=0)])
|
||||
ODF['limit'] = np.radians(limits[1,:])
|
||||
|
||||
if all(limits[0,:]<1e-8): # vertex centered
|
||||
|
@ -319,13 +323,13 @@ for file in files:
|
|||
eulers = [{},{},{}]
|
||||
for i in xrange(table.data.shape[0]):
|
||||
for j in xrange(3):
|
||||
eulers[j][str(table.data[i,column[keys[j]]])] = True # remember eulers along phi1, Phi, and phi2
|
||||
eulers[j][str(table.data[i,j]])] = True # remember eulers along phi1, Phi, and phi2
|
||||
ODF['interval'] = np.array([len(eulers[0]),len(eulers[1]),len(eulers[2]),],'i') # steps are number of distict values
|
||||
ODF['nBins'] = ODF['interval'].prod()
|
||||
ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1))
|
||||
|
||||
if binnedODF[0] != ODF['nBins']:
|
||||
file['croak'].write('expecting %i values but got %i'%(ODF['nBins'],len(linesBinnedODF)))
|
||||
table.croak('expecting %i values but got %i'%(ODF['nBins'],len(linesBinnedODF)))
|
||||
continue
|
||||
|
||||
# build binnedODF array
|
||||
|
@ -335,20 +339,21 @@ for file in files:
|
|||
dg = ODF['delta'][0]*2.0*math.sin(ODF['delta'][1]/2.0)*ODF['delta'][2]
|
||||
for b in range(ODF['nBins']):
|
||||
ODF['dV_V'][b] = \
|
||||
max(0.0,table.data[b,column['intensity']]) * dg * \
|
||||
math.sin(((b//ODF['interval'][2])%ODF['interval'][1]+ODF['center'])*ODF['delta'][1])
|
||||
max(0.0,table.data[b,column['intensity']]) * dg * \
|
||||
math.sin(((b//ODF['interval'][2])%ODF['interval'][1]+ODF['center'])*ODF['delta'][1])
|
||||
if ODF['dV_V'][b] > 0.0:
|
||||
sumdV_V += ODF['dV_V'][b]
|
||||
ODF['nNonZero'] += 1
|
||||
|
||||
for b in range(ODF['nBins']): ODF['dV_V'][b] /= sumdV_V # normalize dV/V
|
||||
for b in range(ODF['nBins']): ODF['dV_V'][b] /= sumdV_V # normalize dV/V
|
||||
|
||||
file['croak'].write('non-zero fraction: %12.11f (%i/%i)\n'\
|
||||
%(float(ODF['nNonZero'])/ODF['nBins'],ODF['nNonZero'],ODF['nBins']))
|
||||
file['croak'].write('Volume integral of ODF: %12.11f\n'%sumdV_V)
|
||||
file['croak'].write('Reference Integral: %12.11f\n'\
|
||||
%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1]))))
|
||||
|
||||
table.croak(['non-zero fraction: %12.11f (%i/%i)'%(float(ODF['nNonZero'])/ODF['nBins'],
|
||||
ODF['nNonZero'],
|
||||
ODF['nBins']),
|
||||
'Volume integral of ODF: %12.11f\n'%sumdV_V,
|
||||
'Reference Integral: %12.11f\n'%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1]))),
|
||||
])
|
||||
|
||||
# call methods
|
||||
Functions = {'IA': 'directInversion', 'STAT': 'TothVanHoutteSTAT', 'MC': 'MonteCarloBins'}
|
||||
method = Functions[options.algorithm]
|
||||
|
@ -372,66 +377,72 @@ for file in files:
|
|||
indivSum['orig'] += ODF['dV_V'][bin]
|
||||
indivSquaredSum['orig'] += ODF['dV_V'][bin]**2
|
||||
|
||||
file['croak'].write('sqrt(N*)RMSD of ODFs:\t %12.11f\n'% math.sqrt(nSamples*squaredDiff[method]))
|
||||
file['croak'].write('RMSrD of ODFs:\t %12.11f\n'%math.sqrt(squaredRelDiff[method]))
|
||||
file['croak'].write('rMSD of ODFs:\t %12.11f\n'%(squaredDiff[method]/indivSquaredSum['orig']))
|
||||
file['croak'].write('nNonZero correlation slope:\t %12.11f\n'\
|
||||
table.croak(['sqrt(N*)RMSD of ODFs:\t %12.11f'% math.sqrt(nSamples*squaredDiff[method]),
|
||||
'RMSrD of ODFs:\t %12.11f'%math.sqrt(squaredRelDiff[method]),
|
||||
'rMSD of ODFs:\t %12.11f'%(squaredDiff[method]/indivSquaredSum['orig']),
|
||||
'nNonZero correlation slope:\t %12.11f'\
|
||||
%((ODF['nNonZero']*mutualProd[method]-indivSum['orig']*indivSum[method])/\
|
||||
(ODF['nNonZero']*indivSquaredSum['orig']-indivSum['orig']**2)))
|
||||
file['croak'].write( 'nNonZero correlation confidence:\t %12.11f\n'\
|
||||
(ODF['nNonZero']*indivSquaredSum['orig']-indivSum['orig']**2)),
|
||||
'nNonZero correlation confidence:\t %12.11f'\
|
||||
%((mutualProd[method]-indivSum['orig']*indivSum[method]/ODF['nNonZero'])/\
|
||||
(ODF['nNonZero']*math.sqrt((indivSquaredSum['orig']/ODF['nNonZero']-(indivSum['orig']/ODF['nNonZero'])**2)*\
|
||||
(indivSquaredSum[method]/ODF['nNonZero']-(indivSum[method]/ODF['nNonZero'])**2)))))
|
||||
(ODF['nNonZero']*math.sqrt((indivSquaredSum['orig']/ODF['nNonZero']-(indivSum['orig']/ODF['nNonZero'])**2)*\
|
||||
(indivSquaredSum[method]/ODF['nNonZero']-(indivSum[method]/ODF['nNonZero'])**2)))),
|
||||
])
|
||||
|
||||
if method == 'IA' and nSamples < ODF['nNonZero']:
|
||||
strOpt = '(%i)'%ODF['nNonZero']
|
||||
|
||||
formatwidth = 1
|
||||
file['output'].write('#' + scriptID + ' ' + ' '.join(sys.argv[1:])+'\n')
|
||||
file['output'].write('# random seed %i\n'%randomSeed)
|
||||
file['output'].write('#-------------------#')
|
||||
file['output'].write('\n<microstructure>\n')
|
||||
file['output'].write('#-------------------#\n')
|
||||
formatwidth = 1+int(math.log10(nSamples))
|
||||
|
||||
materialConfig = [
|
||||
'#' + scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
'# random seed %i'%randomSeed
|
||||
'#-------------------#',
|
||||
'<microstructure>',
|
||||
'#-------------------#',
|
||||
]
|
||||
|
||||
for i,ID in enumerate(xrange(nSamples)):
|
||||
file['output'].write('[Grain%s]\n'%(str(ID+1).zfill(formatwidth)) + \
|
||||
'crystallite %i\n'%options.crystallite + \
|
||||
'(constituent) phase %i texture %s fraction 1.0\n'%(options.phase,str(ID+1).rjust(formatwidth)))
|
||||
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
|
||||
'crystallite %i'%options.crystallite,
|
||||
'(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)),
|
||||
]
|
||||
|
||||
file['output'].write('\n#-------------------#')
|
||||
file['output'].write('\n<texture>\n')
|
||||
file['output'].write('#-------------------#\n')
|
||||
materialConfig += [
|
||||
'#-------------------#',
|
||||
'<texture>',
|
||||
'#-------------------#',
|
||||
]
|
||||
|
||||
for ID in xrange(nSamples):
|
||||
eulers = re.split(r'[\t]', Orientations[ID].strip())
|
||||
eulers = Orientations[ID]
|
||||
|
||||
file['output'].write('[Grain%s]\n'%(str(ID+1).zfill(formatwidth)) + \
|
||||
'(gauss) phi1 %10.5f Phi %10.5f phi2 %10.6f scatter 0.0 fraction 1.0\n'\
|
||||
%(float(eulers[0]),float(eulers[1]),float(eulers[2])))
|
||||
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
|
||||
'(gauss) phi1 %10.5f Phi %10.5f phi2 %10.6f scatter 0.0 fraction 1.0'%(*eulers),
|
||||
]
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
file['output'].close()
|
||||
os.rename(file['name']+'_tmp',
|
||||
os.path.splitext(file['name'])[0] +'_'+method+'_'+str(nSamples)+'%s'%('_material.config'))
|
||||
|
||||
with (open(os.path.splitext(name)[0]+'_'+method+'_'+str(nSamples)+'_material.config','w') as outfile:
|
||||
outfile.write('\n'.join(materialConfig)+'\n')
|
||||
|
||||
# write ang file
|
||||
if options.ang:
|
||||
sizeY = integerFactorization(nSamples)
|
||||
sizeX = nSamples / sizeY
|
||||
print 'Writing .ang file: %i * %i = %i (== %i)'%(sizeX,sizeY,sizeX*sizeY,nSamples)
|
||||
# write header
|
||||
for line in getHeader(sizeX,sizeY,1.0):
|
||||
file['outang'].write(line + '\n')
|
||||
with open(os.path.splitext(name)[0]+'_'+method+'_'+str(nSamples)+'.ang','w') as outfile:
|
||||
sizeY = integerFactorization(nSamples)
|
||||
sizeX = nSamples / sizeY
|
||||
table.croak('Writing .ang file: %i * %i = %i (== %i)'%(sizeX,sizeY,sizeX*sizeY,nSamples))
|
||||
# write header
|
||||
outfile.write('\n'.join(TSLheader(sizeX,sizeY,1.0))+'\n')
|
||||
|
||||
# write data
|
||||
counter = 0
|
||||
for line in Orientations:
|
||||
eulers = re.split(r'[\t]', line.strip())
|
||||
file['outang'].write(''.join(['%10.5f'%math.radians(float(angle)) for angle in eulers])+
|
||||
''.join(['%10.5f'%coord for coord in [counter%sizeX,counter//sizeX]])+
|
||||
' 100.0 1.0 0 1 1.0\n')
|
||||
counter += 1
|
||||
# write data
|
||||
counter = 0
|
||||
for eulers in Orientations:
|
||||
outfile.write('%10.5f %10.5f %10.5f '%(*np.radians(eulers)) +
|
||||
'%10.5f %10.5f '%(counter%sizeX,counter//sizeX) +
|
||||
'100.0 1.0 0 1 1.0\n')
|
||||
counter += 1
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
file['outang'].close()
|
||||
os.rename(file['name']+'_ang_tmp',
|
||||
os.path.splitext(file['name'])[0] +'_'+method+'_'+str(nSamples)+'%s'%('.ang'))
|
||||
|
||||
table.close()
|
||||
|
|
|
@ -12,136 +12,118 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [seedsfile[s]]', description = """
|
||||
Produce VTK point mesh from seeds file
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-s', '--size', dest='size', type='float', nargs = 3, metavar='float float float',\
|
||||
help='x,y,z size of hexahedral box [1.0 along largest grid point number]')
|
||||
parser.add_option('-s', '--size',
|
||||
dest = 'size',
|
||||
type = 'float', nargs = 3, metavar = 'float float float',
|
||||
help = 'x,y,z size of hexahedral box [1.0 along largest grid point number]')
|
||||
parser.add_option('-p','--position',
|
||||
dest = 'position',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for coordinates [%default]')
|
||||
|
||||
parser.set_defaults(size = [0.0,0.0,0.0])
|
||||
parser.set_defaults(size = [0.0,0.0,0.0],
|
||||
position = 'pos',
|
||||
)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, readonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],buffered = False)
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
remarks = []
|
||||
errors = []
|
||||
|
||||
if np.all(table.label_index(['1_coords','2_coords','3_coords']) != -1):
|
||||
labels = ['1_coords','2_coords','3_coords']
|
||||
elif np.all(table.label_index(['x','y','z']) != -1):
|
||||
labels = ['x','y','z']
|
||||
if np.any(info['grid'] < 1): remarks.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0) \
|
||||
and np.all(info['grid'] < 1): errors.append('invalid size x y z.')
|
||||
else:
|
||||
file['croak'].write('no coordinate data (1/2/3_coords | x/y/z) found ...')
|
||||
for i in xrange(3):
|
||||
if info['size'][i] <= 0.0: # any invalid size?
|
||||
info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid
|
||||
remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i]))
|
||||
if table.label_dimension(options.position) != 3: errors.append('columns "{}" have dimension {}'.format(options.position,
|
||||
table.label_dimension(options.position)))
|
||||
if remarks != []: table.croak(remarks)
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss=True)
|
||||
continue
|
||||
|
||||
labels = ['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in xrange(3)]
|
||||
hasGrains = table.label_index('microstructure') != -1
|
||||
labels += ['microstructure'] if hasGrains else []
|
||||
|
||||
table.data_readArray(labels) # read ASCIItable columns
|
||||
|
||||
coords = table.data[:,:3] # assign coordinates
|
||||
grain = table.data[:,3].astype('i') if hasGrains else 1+np.arange(len(coords),dtype='i') # assign grains
|
||||
grainIDs = np.unique(grain).astype('i') # find all grainIDs present
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.array(options.size),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
# grainIDs = np.unique(grain).astype('i') # find all grainIDs present
|
||||
|
||||
# --- generate grid --------------------------------------------------------------------------------
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
|
||||
if info['microstructures'] != len(grainIDs):
|
||||
file['croak'].write('grain data not matching grain count (%i)...\n'%(len(grainIDs)))
|
||||
info['microstructures'] = len(grainIDs)
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
|
||||
for i in xrange(3):
|
||||
if info['size'][i] <= 0.0: # any invalid size?
|
||||
info['size'][i] = float(info['grid'][i])/max(info['grid'])
|
||||
file['croak'].write('rescaling size %s...\n'%{0:'x',1:'y',2:'z'}[i])
|
||||
|
||||
#--- generate grid --------------------------------------------------------------------------------
|
||||
grid = vtk.vtkUnstructuredGrid()
|
||||
pts = vtk.vtkPoints()
|
||||
|
||||
#--- process microstructure information --------------------------------------------------------------
|
||||
# --- process microstructure information --------------------------------------------------------------
|
||||
|
||||
IDs = vtk.vtkIntArray()
|
||||
IDs.SetNumberOfComponents(1)
|
||||
IDs.SetName("GrainID")
|
||||
|
||||
for i,item in enumerate(coords):
|
||||
IDs.InsertNextValue(grain[i])
|
||||
pid = pts.InsertNextPoint(item[0:3])
|
||||
pointIds = vtk.vtkIdList()
|
||||
pointIds.InsertId(0, pid)
|
||||
grid.InsertNextCell(1, pointIds)
|
||||
IDs.InsertNextValue(grain[i])
|
||||
|
||||
grid.SetPoints(pts)
|
||||
grid.GetCellData().AddArray(IDs)
|
||||
|
||||
#--- write data -----------------------------------------------------------------------------------
|
||||
if file['name'] == 'STDIN':
|
||||
# --- write data -----------------------------------------------------------------------------------
|
||||
|
||||
if name == 'STDIN':
|
||||
writer = vtk.vtkUnstructuredGridWriter()
|
||||
writer.WriteToOutputStringOn()
|
||||
writer.SetFileTypeToASCII()
|
||||
writer.SetHeader('# powered by '+scriptID)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(grid)
|
||||
else:
|
||||
writer.SetInputData(grid)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
|
||||
else: writer.SetInputData(grid)
|
||||
writer.Write()
|
||||
sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
|
||||
else:
|
||||
table.close(dismiss=True)
|
||||
(head,tail) = os.path.split(file['name'])
|
||||
(dir,filename) = os.path.split(name)
|
||||
writer = vtk.vtkXMLUnstructuredGridWriter()
|
||||
writer.SetDataModeToBinary()
|
||||
writer.SetCompressorTypeToZLib()
|
||||
writer.SetFileName(os.path.join(head,'seeds_'+os.path.splitext(tail)[0]
|
||||
+'.'+writer.GetDefaultFileExtension()))
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(grid)
|
||||
else:
|
||||
writer.SetInputData(grid)
|
||||
writer.SetFileName(os.path.join(dir,'seeds_'+os.path.splitext(filename)[0]
|
||||
+'.'+writer.GetDefaultFileExtension()))
|
||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
|
||||
else: writer.SetInputData(grid)
|
||||
writer.Write()
|
||||
|
||||
table.close()
|
||||
|
|
|
@ -12,155 +12,103 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Create seed file taking microstructure indices from given geom file but excluding black-listed grains.
|
||||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-w','--white', dest='whitelist', action='extend', \
|
||||
help='white list of grain IDs', metavar='<LIST>')
|
||||
parser.add_option('-b','--black', dest='blacklist', action='extend', \
|
||||
help='black list of grain IDs', metavar='<LIST>')
|
||||
parser.add_option('-w','--white',
|
||||
action = 'extend', metavar='<int LIST>',
|
||||
dest = 'whitelist',
|
||||
help = 'whitelist of grain IDs')
|
||||
parser.add_option('-b','--black',
|
||||
action = 'extend', metavar='<int LIST>',
|
||||
dest = 'blacklist',
|
||||
help = 'blacklist of grain IDs')
|
||||
parser.add_option('-p','--position',
|
||||
dest = 'position',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for coordinates [%default]')
|
||||
|
||||
parser.set_defaults(whitelist = [])
|
||||
parser.set_defaults(blacklist = [])
|
||||
parser.set_defaults(whitelist = [],
|
||||
blacklist = [],
|
||||
position = 'pos',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
options.whitelist = map(int,options.whitelist)
|
||||
options.blacklist = map(int,options.blacklist)
|
||||
|
||||
#--- setup file handles --------------------------------------------------------------------------
|
||||
files = []
|
||||
if filenames == []:
|
||||
files.append({'name':'STDIN',
|
||||
'input':sys.stdin,
|
||||
'output':sys.stdout,
|
||||
'croak':sys.stderr,
|
||||
})
|
||||
else:
|
||||
for name in filenames:
|
||||
if os.path.exists(name):
|
||||
files.append({'name':name,
|
||||
'input':open(name),
|
||||
'output':open(os.path.splitext(name)[0]+'.seeds','w'),
|
||||
'croak':sys.stdout,
|
||||
})
|
||||
# --- loop over output files -------------------------------------------------------------------------
|
||||
|
||||
#--- loop over input files ------------------------------------------------------------------------
|
||||
for file in files:
|
||||
file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n')
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.seeds',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False)
|
||||
table.head_read()
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
newInfo = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in table.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue # skip blank lines
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
if 'origin' not in info:
|
||||
info['origin'] = np.zeros(3)
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i') # initialize as flat array
|
||||
i = 0
|
||||
while table.data_read():
|
||||
items = table.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info = [
|
||||
scriptID,
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %i\ty %i\tz %i"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %i\ty %i\tz %i"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
]
|
||||
table.labels_clear()
|
||||
table.labels_append(['1_coords','2_coords','3_coords','microstructure']) # implicitly switching label processing/writing on
|
||||
table.head_write()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
#--- filtering of grain voxels ------------------------------------------------------------------------------------
|
||||
table.data_clear()
|
||||
i = 0
|
||||
outputDead = False
|
||||
coord = np.zeros(3,'d')
|
||||
for coord[2] in xrange(info['grid'][2]):
|
||||
for coord[1] in xrange(info['grid'][1]):
|
||||
for coord[0] in xrange(info['grid'][0]):
|
||||
if (options.whitelist == [] and options.blacklist == []) or \
|
||||
(options.whitelist != [] and microstructure[i] in options.whitelist) or \
|
||||
(options.blacklist != [] and microstructure[i] not in options.blacklist):
|
||||
table.data = list((coord+0.5)/info['grid'])+[microstructure[i]]
|
||||
outputDead = not table.data_write()
|
||||
i += 1
|
||||
if outputDead: break
|
||||
if outputDead: break
|
||||
if outputDead: break
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
# ------------------------------------------ output result ---------------------------------------
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
outputDead or table.output_flush() # just in case of buffered ASCII table
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
table.input_close() # close input ASCII table
|
||||
if file['name'] != 'STDIN':
|
||||
table.output_close() # close output ASCII table
|
||||
microstructure = table.microstructure_read(info['grid']) # read (linear) microstructure
|
||||
|
||||
# --- generate grid --------------------------------------------------------------------------------
|
||||
|
||||
x = (0.5 + np.arange(info['grid'][0],dtype=float))/info['grid'][0]*info['size'][0]+info['origin'][0]
|
||||
y = (0.5 + np.arange(info['grid'][1],dtype=float))/info['grid'][1]*info['size'][1]+info['origin'][1]
|
||||
z = (0.5 + np.arange(info['grid'][2],dtype=float))/info['grid'][2]*info['size'][2]+info['origin'][2]
|
||||
|
||||
xx = np.tile( x, info['grid'][1]* info['grid'][2])
|
||||
yy = np.tile(np.repeat(y,info['grid'][0] ),info['grid'][2])
|
||||
zz = np.repeat(z,info['grid'][0]*info['grid'][1])
|
||||
|
||||
mask = np.logical_and(np.in1d(microstructure,options.whitelist,invert=False) if options.whitelist != [] else np.full_like(microstructure,True,dtype=bool),
|
||||
np.in1d(microstructure,options.blacklist,invert=True ) if options.blacklist != [] else np.full_like(microstructure,True,dtype=bool))
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write seeds information ------------------------------------------------------------
|
||||
|
||||
table.data = np.squeeze(np.dstack((xx,yy,zz,microstructure)))[mask]
|
||||
table.data_writeArray()
|
||||
|
||||
# ------------------------------------------ finalize output ---------------------------------------
|
||||
|
||||
table.close()
|
||||
|
|
|
@ -11,18 +11,6 @@ scriptName = scriptID.split()[1]
|
|||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Create seeds file by poking at 45 degree through given geom file.
|
||||
|
@ -30,105 +18,85 @@ Mimics APS Beamline 34-ID-E DAXM poking.
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-N', '--points', dest='N', type='int', metavar='int', \
|
||||
help='number of poking locations [%default]')
|
||||
parser.add_option('-z', '--planes', dest='z', type='float', nargs = 2, metavar='float float', \
|
||||
help='top and bottom z plane')
|
||||
parser.add_option('-x', action='store_true', dest='x', \
|
||||
help='poke 45 deg along x')
|
||||
parser.add_option('-y', action='store_true', dest='y', \
|
||||
help='poke 45 deg along y')
|
||||
parser.add_option('-N', '--points',
|
||||
dest = 'N',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'number of poking locations [%default]')
|
||||
parser.add_option('-z', '--planes',
|
||||
dest = 'z',
|
||||
type = 'float', nargs = 2, metavar='float float',
|
||||
help = 'top and bottom z plane')
|
||||
parser.add_option('-x',
|
||||
action = 'store_true',
|
||||
dest = 'x',
|
||||
help = 'poke 45 deg along x')
|
||||
parser.add_option('-y',
|
||||
action = 'store_true',
|
||||
dest = 'y',
|
||||
help = 'poke 45 deg along y')
|
||||
parser.add_option('-p','--position',
|
||||
dest = 'position',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'column label for coordinates [%default]')
|
||||
|
||||
parser.set_defaults(x = False)
|
||||
parser.set_defaults(y = False)
|
||||
parser.set_defaults(N = 16)
|
||||
parser.set_defaults(x = False,
|
||||
y = False,
|
||||
N = 16,
|
||||
position = 'pos',
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
# --- loop over output files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = name+'_tmp',
|
||||
buffered = False, labeled = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
theTable = damask.ASCIItable(file['input'],file['output'],labels = False)
|
||||
theTable.head_read()
|
||||
# --- interpret header ----------------------------------------------------------------------------
|
||||
|
||||
table.head_read()
|
||||
info,extra_header = table.head_getGeom()
|
||||
|
||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
||||
'homogenization: %i'%info['homogenization'],
|
||||
'microstructures: %i'%info['microstructures'],
|
||||
])
|
||||
|
||||
#--- interpret header ----------------------------------------------------------------------------
|
||||
info = {
|
||||
'grid': np.zeros(3,'i'),
|
||||
'size': np.zeros(3,'d'),
|
||||
'origin': np.zeros(3,'d'),
|
||||
'homogenization': 0,
|
||||
'microstructures': 0,
|
||||
}
|
||||
errors = []
|
||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True)
|
||||
continue
|
||||
|
||||
# --- read data ------------------------------------------------------------------------------------
|
||||
|
||||
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
newInfo = {
|
||||
'microstructures': 0,
|
||||
}
|
||||
extra_header = []
|
||||
|
||||
for header in theTable.info:
|
||||
headitems = map(str.lower,header.split())
|
||||
if len(headitems) == 0: continue
|
||||
if headitems[0] in mappings.keys():
|
||||
if headitems[0] in identifiers.keys():
|
||||
for i in xrange(len(identifiers[headitems[0]])):
|
||||
info[headitems[0]][i] = \
|
||||
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
|
||||
else:
|
||||
info[headitems[0]] = mappings[headitems[0]](headitems[1])
|
||||
else:
|
||||
extra_header.append(header)
|
||||
|
||||
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
|
||||
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
|
||||
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
|
||||
'homogenization: %i\n'%info['homogenization'] + \
|
||||
'microstructures: %i\n'%info['microstructures'])
|
||||
|
||||
if np.any(info['grid'] < 1):
|
||||
file['croak'].write('invalid grid a b c.\n')
|
||||
continue
|
||||
if np.any(info['size'] <= 0.0):
|
||||
file['croak'].write('invalid size x y z.\n')
|
||||
continue
|
||||
|
||||
#--- read data ------------------------------------------------------------------------------------
|
||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||
i = 0
|
||||
|
||||
while theTable.data_read():
|
||||
items = theTable.data
|
||||
if len(items) > 2:
|
||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
|
||||
else: items = map(int,items)
|
||||
else: items = map(int,items)
|
||||
|
||||
s = len(items)
|
||||
microstructure[i:i+s] = items
|
||||
i += s
|
||||
|
||||
#--- do work ------------------------------------------------------------------------------------
|
||||
'microstructures': 0,
|
||||
}
|
||||
|
||||
Nx = int(options.N/math.sqrt(options.N*info['size'][1]/info['size'][0]))
|
||||
Ny = int(options.N/math.sqrt(options.N*info['size'][0]/info['size'][1]))
|
||||
Nz = int((max(options.z)-min(options.z))/info['size'][2]*info['grid'][2])
|
||||
|
||||
file['croak'].write('poking %i x %i x %i...\n'%(Nx,Ny,Nz))
|
||||
microstructure = microstructure.reshape(info['grid'],order='F')
|
||||
table.croak('poking {0} x {1} x {2}...'.format(Nx,Ny,Nz))
|
||||
|
||||
seeds = np.zeros((Nx*Ny*Nz,4),'d')
|
||||
grid = np.zeros(3,'i')
|
||||
|
||||
offset = min(options.z)/info['size'][2]*info['grid'][2] # offset due to lower z-plane
|
||||
offset = min(options.z)/info['size'][2]*info['grid'][2] # offset due to lower z-plane
|
||||
n = 0
|
||||
for i in xrange(Nx):
|
||||
grid[0] = round((i+0.5)*info['grid'][0]/Nx-0.5)
|
||||
|
@ -138,41 +106,43 @@ for name in filenames:
|
|||
grid[2] = offset + k
|
||||
grid %= info['grid']
|
||||
coordinates = (0.5+grid)*info['size']/info['grid']
|
||||
seeds[n,0:3] = coordinates/info['size'] # normalize coordinates to box
|
||||
seeds[n,0:3] = coordinates/info['size'] # normalize coordinates to box
|
||||
seeds[n, 3] = microstructure[grid[0],grid[1],grid[2]]
|
||||
# file['croak'].write('%s\t%i\n'%(str(seeds[n,:3]),seeds[n,3]))
|
||||
if options.x: grid[0] += 1
|
||||
if options.y: grid[1] += 1
|
||||
n += 1
|
||||
# file['croak'].write('\n')
|
||||
|
||||
newInfo['microstructures'] = len(np.unique(seeds[:,3]))
|
||||
|
||||
#--- report ---------------------------------------------------------------------------------------
|
||||
if (newInfo['microstructures'] != info['microstructures']):
|
||||
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
|
||||
# --- report ---------------------------------------------------------------------------------------
|
||||
|
||||
#--- write header ---------------------------------------------------------------------------------
|
||||
theTable.labels_clear()
|
||||
theTable.labels_append(['x','y','z','microstructure'])
|
||||
theTable.info_clear()
|
||||
theTable.info_append(extra_header+[
|
||||
scriptID,
|
||||
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
|
||||
"size\tx %f\ty %f\tz %f"%(info['size'][0],info['size'][1],info['size'][2],),
|
||||
"origin\tx %f\ty %f\tz %f"%(info['origin'][0],info['origin'][1],info['origin'][2],),
|
||||
"homogenization\t%i"%info['homogenization'],
|
||||
"microstructures\t%i"%(newInfo['microstructures']),
|
||||
remarks = []
|
||||
if ( newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
|
||||
if remarks != []: table.croak(remarks)
|
||||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_clear()
|
||||
table.info_append(extra_header+[
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']),
|
||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']),
|
||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
||||
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']),
|
||||
])
|
||||
|
||||
theTable.head_write()
|
||||
theTable.output_flush()
|
||||
theTable.data = seeds
|
||||
theTable.data_writeArray('%g')
|
||||
theTable.output_flush()
|
||||
table.labels_clear()
|
||||
table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure'])
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write seeds information ------------------------------------------------------------
|
||||
|
||||
#--- output finalization --------------------------------------------------------------------------
|
||||
if file['name'] != 'STDIN':
|
||||
theTable.close()
|
||||
os.rename(file['name']+'_tmp',os.path.splitext(file['name'])[0] + '_poked_%ix%ix%i.seeds'%(Nx,Ny,Nz))
|
||||
theTable.data = seeds
|
||||
theTable.data_writeArray()
|
||||
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
if name != 'STDIN':
|
||||
os.rename(name+'_tmp',os.path.splitext(name])[0] + '_poked_%ix%ix%i.seeds'%(Nx,Ny,Nz))
|
||||
|
|
|
@ -11,6 +11,21 @@ from scipy import spatial
|
|||
scriptID = string.replace('$Id$','\n','\\n')
|
||||
scriptName = os.path.splitext(scriptID.split()[1])[0]
|
||||
|
||||
# ------------------------------------------ aux functions ---------------------------------
|
||||
|
||||
def kdtree_search(cloud, queryPoints):
|
||||
'''
|
||||
find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d)
|
||||
'''
|
||||
n = queryPoints.shape[0]
|
||||
distances = np.zeros(n,dtype=float)
|
||||
tree = spatial.cKDTree(cloud)
|
||||
|
||||
for i in xrange(n):
|
||||
distances[i], index = tree.query(queryPoints[i])
|
||||
|
||||
return distances
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
|
@ -21,144 +36,172 @@ Reports positions with random crystal orientations in seeds file format to STDOU
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-N', dest='N', type='int', metavar='int', \
|
||||
help='number of seed points to distribute [%default]')
|
||||
parser.add_option('-g','--grid', dest='grid', type='int', nargs=3, metavar='int int int', \
|
||||
parser.add_option('-N', dest='N',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'number of seed points to distribute [%default]')
|
||||
parser.add_option('-g','--grid',
|
||||
dest = 'grid',
|
||||
type = 'int', nargs = 3, metavar = 'int int int',
|
||||
help='min a,b,c grid of hexahedral box %default')
|
||||
parser.add_option('-r', '--rnd', dest='randomSeed', type='int', metavar='int', \
|
||||
help='seed of random number generator [%default]')
|
||||
parser.add_option('-w', '--weights', dest='weights', action='store_true',
|
||||
help = 'assign random weigts (Gaussian Distribution) to seed points for laguerre tessellation [%default]')
|
||||
parser.add_option('-m', '--microstructure', dest='microstructure', type='int',
|
||||
help='first microstructure index [%default]', metavar='int')
|
||||
parser.add_option('-s','--selective', dest='selective', action='store_true',
|
||||
help = 'selective picking of seed points from random seed points [%default]')
|
||||
parser.add_option('-m', '--microstructure',
|
||||
dest = 'microstructure',
|
||||
type = 'int', metavar='int',
|
||||
help = 'first microstructure index [%default]')
|
||||
parser.add_option('-r', '--rnd',
|
||||
dest = 'randomSeed', type = 'int', metavar = 'int',
|
||||
help = 'seed of random number generator [%default]')
|
||||
|
||||
group = OptionGroup(parser, "Laguerre Tessellation Options",
|
||||
"Parameters determining shape of weight distribution of seed points "
|
||||
"Parameters determining shape of weight distribution of seed points"
|
||||
)
|
||||
group.add_option('--mean', dest='mean', type='float', metavar='float', \
|
||||
help='mean of Gaussian Distribution for weights [%default]')
|
||||
group.add_option('--sigma', dest='sigma', type='float', metavar='float', \
|
||||
help='standard deviation of Gaussian Distribution for weights [%default]')
|
||||
group.add_option('-w', '--weights',
|
||||
action = 'store_true',
|
||||
dest = 'weights',
|
||||
help = 'assign random weigts (normal distribution) to seed points for Laguerre tessellation [%default]')
|
||||
group.add_option('--mean',
|
||||
dest = 'mean',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'mean of normal distribution for weights [%default]')
|
||||
group.add_option('--sigma',
|
||||
dest = 'sigma',
|
||||
type = 'float', metavar = 'float',
|
||||
help='standard deviation of normal distribution for weights [%default]')
|
||||
parser.add_option_group(group)
|
||||
|
||||
group = OptionGroup(parser, "Selective Seeding Options",
|
||||
"More uniform distribution of seed points using Mitchell\'s Best Candidate Algorithm"
|
||||
)
|
||||
group.add_option('--distance', dest='bestDistance', type='float', metavar='float', \
|
||||
help='minimum distance to the next neighbor [%default]')
|
||||
group.add_option('--numCandidates', dest='numCandidates', type='int', metavar='int', \
|
||||
help='maximum number of point to consider for initial random points generation [%default]')
|
||||
group.add_option('-s','--selective',
|
||||
action = 'store_true',
|
||||
dest = 'selective',
|
||||
help = 'selective picking of seed points from random seed points [%default]')
|
||||
group.add_option('--distance',
|
||||
dest = 'distance',
|
||||
type = 'float', metavar = 'float',
|
||||
help = 'minimum distance to the next neighbor [%default]')
|
||||
group.add_option('--numCandidates',
|
||||
dest = 'numCandidates',
|
||||
type = 'int', metavar = 'int',
|
||||
help = 'size of point group to select best distance from [%default]')
|
||||
parser.add_option_group(group)
|
||||
|
||||
parser.set_defaults(randomSeed = None)
|
||||
parser.set_defaults(grid = (16,16,16))
|
||||
parser.set_defaults(N = 20)
|
||||
parser.set_defaults(weights=False)
|
||||
parser.set_defaults(mean = 0.0)
|
||||
parser.set_defaults(sigma = 1.0)
|
||||
parser.set_defaults(microstructure = 1)
|
||||
parser.set_defaults(selective = False)
|
||||
parser.set_defaults(bestDistance = 0.2)
|
||||
parser.set_defaults(numCandidates = 10)
|
||||
parser.set_defaults(randomSeed = None,
|
||||
grid = (16,16,16),
|
||||
N = 20,
|
||||
weights = False,
|
||||
mean = 0.0,
|
||||
sigma = 1.0,
|
||||
microstructure = 1,
|
||||
selective = False,
|
||||
distance = 0.2,
|
||||
numCandidates = 10,
|
||||
)
|
||||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
|
||||
(options,filename) = parser.parse_args()
|
||||
options.grid = np.array(options.grid)
|
||||
|
||||
labels = "1_coords\t2_coords\t3_coords\tphi1\tPhi\tphi2\tmicrostructure"
|
||||
|
||||
# ------------------------------------------ Functions Definitions ---------------------------------
|
||||
|
||||
def kdtree_search(xyz, point) :
|
||||
dist, index = spatial.cKDTree(xyz).query(np.array(point))
|
||||
return dist
|
||||
|
||||
def generatePoint() :
|
||||
return np.array([random.uniform(0,float(options.grid[0])/float(max(options.grid))), \
|
||||
random.uniform(0,float(options.grid[1])/float(max(options.grid))), \
|
||||
random.uniform(0,float(options.grid[2])/float(max(options.grid)))])
|
||||
|
||||
|
||||
# ------------------------------------------ setup file handle -------------------------------------
|
||||
if filename == []:
|
||||
file = {'output':sys.stdout, 'croak':sys.stderr}
|
||||
else:
|
||||
file = {'output':open(filename[0],'w'), 'croak':sys.stderr}
|
||||
|
||||
gridSize = options.grid.prod()
|
||||
if gridSize == 0:
|
||||
file['croak'].write('zero grid dimension for %s.\n'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]])))
|
||||
sys.exit()
|
||||
if options.N > gridSize:
|
||||
file['croak'].write('accommodating only %i seeds on grid.\n'%gridSize)
|
||||
options.N = gridSize
|
||||
randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed
|
||||
np.random.seed(randomSeed) # init random generators
|
||||
random.seed(randomSeed)
|
||||
|
||||
grainEuler = np.random.rand(3,options.N) # create random Euler triplets
|
||||
grainEuler[0,:] *= 360.0 # phi_1 is uniformly distributed
|
||||
grainEuler[1,:] = np.arccos(2*grainEuler[1,:]-1)*180.0/math.pi # cos(Phi) is uniformly distributed
|
||||
grainEuler[2,:] *= 360.0 # phi_2 is uniformly distributed
|
||||
if options.randomSeed == None: options.randomSeed = int(os.urandom(4).encode('hex'), 16)
|
||||
np.random.seed(options.randomSeed) # init random generators
|
||||
random.seed(options.randomSeed)
|
||||
|
||||
microstructure=np.arange(options.microstructure,options.microstructure+options.N).reshape(1,options.N)
|
||||
|
||||
if options.selective == False :
|
||||
seedpoints = -np.ones(options.N,dtype='int') # init grid positions of seed points
|
||||
# --- loop over output files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
|
||||
table = damask.ASCIItable(name = name, outname = None,
|
||||
buffered = False, writeonly = True)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
# --- sanity checks -------------------------------------------------------------------------
|
||||
|
||||
errors = []
|
||||
if gridSize == 0: errors.append('zero grid dimension for %s.'%(', '.join([['a','b','c'][x] for x in np.where(options.grid == 0)[0]])))
|
||||
if options.N > gridSize/10.: errors.append('seed count exceeds 0.1 of grid points.')
|
||||
if options.selective and 4./3.*math.pi*(options.distance/2.)**3*options.N > 0.5:
|
||||
errors.append('maximum recommended seed point count for given distance is {}.'.format(int(3./8./math.pi/(options.distance/2.)**3)))
|
||||
if errors != []:
|
||||
table.croak(errors)
|
||||
sys.exit()
|
||||
|
||||
# --- do work ------------------------------------------------------------------------------------
|
||||
|
||||
grainEuler = np.random.rand(3,options.N) # create random Euler triplets
|
||||
grainEuler[0,:] *= 360.0 # phi_1 is uniformly distributed
|
||||
grainEuler[1,:] = np.degrees(np.arccos(2*grainEuler[1,:]-1)) # cos(Phi) is uniformly distributed
|
||||
grainEuler[2,:] *= 360.0 # phi_2 is uniformly distributed
|
||||
|
||||
if not options.selective:
|
||||
|
||||
seeds = np.zeros((3,options.N),dtype=float) # seed positions array
|
||||
gridpoints = random.sample(range(gridSize),options.N) # create random permutation of all grid positions and choose first N
|
||||
|
||||
seeds[0,:] = (np.mod(gridpoints ,options.grid[0])\
|
||||
+np.random.random()) /options.grid[0]
|
||||
seeds[1,:] = (np.mod(gridpoints// options.grid[0] ,options.grid[1])\
|
||||
+np.random.random()) /options.grid[1]
|
||||
seeds[2,:] = (np.mod(gridpoints//(options.grid[1]*options.grid[0]),options.grid[2])\
|
||||
+np.random.random()) /options.grid[2]
|
||||
|
||||
if options.N * 1024 < gridSize: # heuristic limit for random search
|
||||
i = 0
|
||||
while i < options.N: # until all (unique) points determined
|
||||
p = np.random.randint(gridSize) # pick a location
|
||||
if p not in seedpoints: # not yet taken?
|
||||
seedpoints[i] = p # take it
|
||||
i += 1 # advance stepper
|
||||
else:
|
||||
seedpoints = np.array(random.sample(range(gridSize),options.N)) # create random permutation of all grid positions and choose first N
|
||||
|
||||
seeds = np.zeros((3,options.N),float) # init seed positions
|
||||
seeds[0,:] = (np.mod(seedpoints ,options.grid[0])\
|
||||
+np.random.random())/options.grid[0]
|
||||
seeds[1,:] = (np.mod(seedpoints// options.grid[0] ,options.grid[1])\
|
||||
+np.random.random())/options.grid[1]
|
||||
seeds[2,:] = (np.mod(seedpoints//(options.grid[1]*options.grid[0]),options.grid[2])\
|
||||
+np.random.random())/options.grid[2]
|
||||
table = np.transpose(np.concatenate((seeds,grainEuler,microstructure),axis = 0))
|
||||
else :
|
||||
samples = generatePoint().reshape(1,3)
|
||||
seeds = np.zeros((options.N,3),dtype=float) # seed positions array
|
||||
seeds[0] = np.random.random(3)*options.grid/max(options.grid)
|
||||
i = 1 # start out with one given point
|
||||
if i%(options.N/100.) < 1: table.croak('.',False)
|
||||
|
||||
while samples.shape[0] < options.N :
|
||||
bestDistance = options.bestDistance
|
||||
for i in xrange(options.numCandidates) :
|
||||
c = generatePoint()
|
||||
d = kdtree_search(samples, c)
|
||||
if (d > bestDistance) :
|
||||
bestDistance = d
|
||||
bestCandidate = c
|
||||
if kdtree_search(samples,bestCandidate) != 0.0 :
|
||||
samples = np.append(samples,bestCandidate.reshape(1,3),axis=0)
|
||||
else :
|
||||
continue
|
||||
table = np.transpose(np.concatenate((samples.T,grainEuler,microstructure),axis = 0))
|
||||
while i < options.N:
|
||||
candidates = np.random.random(options.numCandidates*3).reshape(options.numCandidates,3)
|
||||
distances = kdtree_search(seeds[:i],candidates)
|
||||
best = distances.argmax()
|
||||
if distances[best] > options.distance: # require minimum separation
|
||||
seeds[i] = candidates[best] # take candidate with maximum separation to existing point cloud
|
||||
i += 1
|
||||
if i%(options.N/100.) < 1: table.croak('.',False)
|
||||
|
||||
if options.weights :
|
||||
weight = np.random.normal(loc=options.mean, scale=options.sigma, size=options.N)
|
||||
table = np.append(table, weight.reshape(options.N,1), axis=1)
|
||||
labels += "\tweight"
|
||||
table.croak('')
|
||||
seeds = np.transpose(seeds) # prepare shape for stacking
|
||||
|
||||
# -------------------------------------- Write Data --------------------------------------------------
|
||||
if options.weights:
|
||||
seeds = np.transpose(np.vstack((seeds,
|
||||
grainEuler,
|
||||
np.arange(options.microstructure,
|
||||
options.microstructure + options.N),
|
||||
np.random.normal(loc=options.mean, scale=options.sigma, size=options.N),
|
||||
)))
|
||||
else:
|
||||
seeds = np.transpose(np.vstack((seeds,
|
||||
grainEuler,
|
||||
np.arange(options.microstructure,
|
||||
options.microstructure + options.N),
|
||||
)))
|
||||
|
||||
header = ["5\theader",
|
||||
scriptID + " " + " ".join(sys.argv[1:]),
|
||||
"grid\ta {}\tb {}\tc {}".format(options.grid[0],options.grid[1],options.grid[2]),
|
||||
"microstructures\t{}".format(options.N),
|
||||
"randomSeed\t{}".format(randomSeed),
|
||||
"%s"%labels,
|
||||
]
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
for line in header:
|
||||
file['output'].write(line+"\n")
|
||||
np.savetxt(file['output'], table, fmt='%10.6f', delimiter='\t')
|
||||
table.info_clear()
|
||||
table.info_append([
|
||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=options.grid),
|
||||
"microstructures\t{}".format(options.N),
|
||||
"randomSeed\t{}".format(options.randomSeed),
|
||||
])
|
||||
table.labels_clear()
|
||||
table.labels_append( ['{dim}_{label}'.format(dim = 1+i,label = 'pos') for i in xrange(3)] +
|
||||
['{dim}_{label}'.format(dim = 1+i,label = 'Euler') for i in xrange(3)] +
|
||||
['microstructure'] +
|
||||
(['weight'] if options.weights else []))
|
||||
table.head_write()
|
||||
table.output_flush()
|
||||
|
||||
# --- write seeds information ------------------------------------------------------------
|
||||
|
||||
table.data = seeds
|
||||
table.data_writeArray()
|
||||
|
||||
# --- output finalization --------------------------------------------------------------------------
|
||||
|
||||
table.close() # close ASCII table
|
||||
|
|
|
@ -13,18 +13,6 @@ scriptName = os.path.splitext(scriptID.split()[1])[0]
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# MAIN
|
||||
#--------------------------------------------------------------------------------------------------
|
||||
identifiers = {
|
||||
'grid': ['a','b','c'],
|
||||
'size': ['x','y','z'],
|
||||
'origin': ['x','y','z'],
|
||||
}
|
||||
mappings = {
|
||||
'grid': lambda x: int(x),
|
||||
'size': lambda x: float(x),
|
||||
'origin': lambda x: float(x),
|
||||
'homogenization': lambda x: int(x),
|
||||
'microstructures': lambda x: int(x),
|
||||
}
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||
Create seed file by taking microstructure indices from given ASCIItable column.
|
||||
|
@ -36,16 +24,27 @@ Examples:
|
|||
|
||||
""", version = scriptID)
|
||||
|
||||
parser.add_option('-p', '--positions', dest = 'pos', metavar = 'string',
|
||||
help = 'coordinate label')
|
||||
parser.add_option('--boundingbox', dest = 'box', type = 'float', nargs = 6, metavar = ' '.join(['float']*6),
|
||||
help = 'min (x,y,z) and max (x,y,z) coordinates of bounding box [auto]')
|
||||
parser.add_option('-i', '--index', dest = 'index', type = 'string', metavar = 'string',
|
||||
help = 'microstructure index label')
|
||||
parser.add_option('-w','--white', dest = 'whitelist', action = 'extend',
|
||||
help = 'white list of microstructure indices', metavar = '<LIST>')
|
||||
parser.add_option('-b','--black', dest = 'blacklist', action = 'extend',
|
||||
help = 'black list of microstructure indices', metavar = '<LIST>')
|
||||
parser.add_option('-p', '--positions',
|
||||
dest = 'pos',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'coordinate label [%default]')
|
||||
parser.add_option('--boundingbox',
|
||||
dest = 'box',
|
||||
type = 'float', nargs = 6, metavar = ' '.join(['float']*6),
|
||||
help = 'min (x,y,z) and max (x,y,z) coordinates of bounding box [tight]')
|
||||
parser.add_option('-i', '--index',
|
||||
dest = 'index',
|
||||
type = 'string', metavar = 'string',
|
||||
help = 'microstructure index label [%default]')
|
||||
parser.add_option('-w','--white',
|
||||
dest = 'whitelist',
|
||||
action = 'extend', metavar = '<int LIST>',
|
||||
help = 'whitelist of microstructure indices')
|
||||
parser.add_option('-b','--black',
|
||||
dest = 'blacklist',
|
||||
action = 'extend', metavar = '<int LIST>',
|
||||
help = 'blacklist of microstructure indices')
|
||||
|
||||
parser.set_defaults(pos = 'pos',
|
||||
index ='microstructure',
|
||||
)
|
||||
|
@ -57,58 +56,51 @@ if options.blacklist != None: options.blacklist = map(int,options.blacklist)
|
|||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
if filenames == []:
|
||||
filenames = ['STDIN']
|
||||
if filenames == []: filenames = ['STDIN']
|
||||
|
||||
for name in filenames:
|
||||
if name == 'STDIN':
|
||||
file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||
else:
|
||||
if not os.path.exists(name): continue
|
||||
file = {'name':name,
|
||||
'input':open(name),
|
||||
'output':open(os.path.splitext(name)[0]+ \
|
||||
('' if options.label == None else '_'+options.label)+ \
|
||||
'.png','w'),
|
||||
'croak':sys.stderr}
|
||||
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||
|
||||
table = damask.ASCIItable(file['input'],file['output'],
|
||||
buffered = False) # make unbuffered ASCII_table
|
||||
if not (name == 'STDIN' or os.path.exists(name)): continue
|
||||
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[0]+'.seeds',
|
||||
buffered = False)
|
||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else ''))
|
||||
|
||||
table.head_read() # read ASCII header info
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
# ------------------------------------------ sanity checks ---------------------------------------
|
||||
|
||||
missing_labels = table.data_readArray([options.pos,options.index])
|
||||
|
||||
errors = []
|
||||
|
||||
missing_labels = table.data_readArray(options.pos,options.label)
|
||||
if len(missing_labels) > 0:
|
||||
errors.append('column%s %s not found'%('s' if len(missing_labels) > 1 else '',
|
||||
errors.append('column{} {} not found'.format('s' if len(missing_labels) > 1 else '',
|
||||
', '.join(missing_labels)))
|
||||
|
||||
for label, dim in {options.pos: 3,
|
||||
options.label: 1}.iteritems():
|
||||
options.index: 1}.iteritems():
|
||||
if table.label_dimension(label) != dim:
|
||||
errors.append('column %s has wrong dimension'%label)
|
||||
errors.append('column {} has wrong dimension'.format(label))
|
||||
|
||||
if errors != []:
|
||||
file['croak'].write('\n'.join(errors))
|
||||
table.croak(errors)
|
||||
table.close(dismiss = True) # close ASCII table file handles and delete output file
|
||||
continue
|
||||
|
||||
# ------------------------------------------ process data ------------------------------------------
|
||||
|
||||
#--- finding bounding box ------------------------------------------------------------------------------------
|
||||
# --- finding bounding box -------------------------------------------------------------------------
|
||||
|
||||
boundingBox = np.array((np.amin(table.data[:,0:3],axis = 0),np.amax(table.data[:,0:3],axis = 0)))
|
||||
if options.box:
|
||||
boundingBox[0,:] = np.minimum(options.box[0:3],boundingBox[0,:])
|
||||
boundingBox[1,:] = np.maximum(options.box[3:6],boundingBox[1,:])
|
||||
|
||||
#--- rescaling coordinates ------------------------------------------------------------------------------------
|
||||
# --- rescaling coordinates ------------------------------------------------------------------------
|
||||
|
||||
table.data[:,0:3] -= boundingBox[0,:]
|
||||
table.data[:,0:3] /= boundingBox[1,:]-boundingBox[0,:]
|
||||
|
||||
|
||||
#--- filtering of grain voxels ------------------------------------------------------------------------------------
|
||||
# --- filtering of grain voxels --------------------------------------------------------------------
|
||||
|
||||
mask = np.logical_and(\
|
||||
np.ones_like(table.data[:,3],bool) \
|
||||
if options.whitelist == None \
|
||||
|
@ -129,10 +121,8 @@ for name in filenames:
|
|||
map(str,boundingBox[1,:]-boundingBox[0,:])))))),
|
||||
]
|
||||
table.labels_clear()
|
||||
table.labels_append(['1_coords','2_coords','3_coords','microstructure']) # implicitly switching label processing/writing on
|
||||
table.labels_append(['1_pos','2_pos','3_pos','microstructure']) # implicitly switching label processing/writing on
|
||||
table.head_write()
|
||||
|
||||
table.data_writeArray()
|
||||
table.output_flush()
|
||||
|
||||
table.close() # close ASCII tables
|
||||
|
|
Loading…
Reference in New Issue