Merge branch 'development' of magit1.mpie.de:damask/DAMASK into development

This commit is contained in:
Martin Diehl 2017-08-12 12:30:58 +02:00
commit 72da494370
3 changed files with 45 additions and 40 deletions

View File

@ -1 +1 @@
v2.0.1-833-ga28b4b3 v2.0.1-840-gb7d4b3d

View File

@ -41,7 +41,7 @@ parser.add_option('-f','--formula',
parser.add_option('-c','--condition', parser.add_option('-c','--condition',
dest = 'condition', metavar='string', dest = 'condition', metavar='string',
help = 'condition to filter rows') help = 'condition to alter existing column data')
parser.set_defaults(condition = None, parser.set_defaults(condition = None,
) )
@ -77,28 +77,27 @@ for name in filenames:
# --------------------------------------- evaluate condition --------------------------------------- # --------------------------------------- evaluate condition ---------------------------------------
if options.condition is not None: if options.condition is not None:
interpolator = []
condition = options.condition # copy per file, since might be altered inline condition = options.condition # copy per file, since might be altered inline
breaker = False breaker = False
for position,operand in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups
condition = condition.replace('#'+operand[0]+'#', idx = table.label_index(column)
{ '': '{%i}'%position, dim = table.label_dimension(column)
's#':'"{%i}"'%position}[operand[1]]) if idx < 0 and column not in specials:
if operand[2] in specials: # special label damask.util.croak('column "{}" not found.'.format(column))
interpolator += ['specials["%s"]'%operand[2]] breaker = True
else: else:
try: if column in specials:
interpolator += ['%s(table.data[%i])'%({ '':'float', replacement = 'specials["{}"]'.format(column)
's#':'str'}[operand[1]], elif dim == 1: # scalar input
table.label_index(operand[2]))] # could be generalized to indexrange as array lookup replacement = '{}(table.data[{}])'.format({ '':'float',
except: 's#':'str'}[marker],idx) # take float or string value of data column
damask.util.croak('column "{}" not found.'.format(operand[2])) elif dim > 1: # multidimensional input (vector, tensor, etc.)
breaker = True replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
if breaker: continue # found mistake in condition evaluation --> next file condition = condition.replace('#'+all+'#',replacement)
evaluator_condition = "'" + condition + "'.format(" + ','.join(interpolator) + ")" if breaker: continue # found mistake in condition evaluation --> next file
# ------------------------------------------ build formulas ---------------------------------------- # ------------------------------------------ build formulas ----------------------------------------
@ -162,7 +161,7 @@ for name in filenames:
# -------------------------------------- evaluate formulas ----------------------------------------- # -------------------------------------- evaluate formulas -----------------------------------------
if options.condition is None or eval(eval(evaluator_condition)): # condition for veteran replacement fulfilled if options.condition is None or eval(condition): # condition for veteran replacement fulfilled
for veteran in veterans: # evaluate formulas that overwrite for veteran in veterans: # evaluate formulas that overwrite
table.data[table.label_index(veteran): table.data[table.label_index(veteran):
table.label_index(veteran)+table.label_dimension(veteran)] = \ table.label_index(veteran)+table.label_dimension(veteran)] = \

View File

@ -51,7 +51,7 @@ parser.add_option('-c','--condition',
dest = 'condition', metavar='string', dest = 'condition', metavar='string',
help = 'condition to filter rows') help = 'condition to filter rows')
parser.set_defaults(condition = '', parser.set_defaults(condition = None,
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
@ -98,23 +98,29 @@ for name in filenames:
else: else:
order = range(len(labels)) # maintain original order of labels order = range(len(labels)) # maintain original order of labels
interpolator = [] # --------------------------------------- evaluate condition ---------------------------------------
condition = options.condition # copy per file, might be altered if options.condition is not None:
for position,operand in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups condition = options.condition # copy per file, since might be altered inline
condition = condition.replace('#'+operand[0]+'#', breaker = False
{ '': '{{{}}}' .format(position),
's#':'"{{{}}}"'.format(position)}[operand[1]]) for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups
if operand[2] in specials: # special label ? idx = table.label_index(column)
interpolator += ['specials["{}"]'.format(operand[2])] dim = table.label_dimension(column)
else: if idx < 0 and column not in specials:
try: damask.util.croak('column "{}" not found.'.format(column))
interpolator += ['{}(table.data[{}])'.format({ '':'float', breaker = True
's#':'str'}[operand[1]], else:
table.label_index(operand[2]))] if column in specials:
except: replacement = 'specials["{}"]'.format(column)
parser.error('column "{}" not found...\n'.format(operand[2])) elif dim == 1: # scalar input
replacement = '{}(table.data[{}])'.format({ '':'float',
evaluator = "'" + condition + "'.format(" + ','.join(interpolator) + ")" 's#':'str'}[marker],idx) # take float or string value of data column
elif dim > 1: # multidimensional input (vector, tensor, etc.)
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
condition = condition.replace('#'+all+'#',replacement)
if breaker: continue # found mistake in condition evaluation --> next file
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
@ -129,7 +135,7 @@ for name in filenames:
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
specials['_row_'] += 1 # count row specials['_row_'] += 1 # count row
if condition == '' or eval(eval(evaluator)): # valid row ? if options.condition is None or eval(condition): # valid row ?
table.data = [table.data[position] for position in positions] # retain filtered columns table.data = [table.data[position] for position in positions] # retain filtered columns
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line