Merge remote-tracking branch 'origin/development' into misc-improvements

This commit is contained in:
Martin Diehl 2019-01-05 09:22:41 +01:00
commit c00d00c219
6 changed files with 54 additions and 28 deletions

View File

@ -1,4 +1,4 @@
Copyright 2011-18 Max-Planck-Institut für Eisenforschung GmbH
Copyright 2011-19 Max-Planck-Institut für Eisenforschung GmbH
DAMASK is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1 +1 @@
v2.0.2-1187-gcd8ee450
v2.0.2-1236-g1ef82e35

View File

@ -134,7 +134,7 @@ class extendableOption(Option):
# Print iterations progress
# from https://gist.github.com/aubricus/f91fb55dc6ba5557fbab06119420dd6a
def progressBar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
def progressBar(iteration, total, start=None, prefix='', suffix='', decimals=1, bar_length=50):
"""
Call in a loop to create terminal progress bar
@ -146,12 +146,19 @@ def progressBar(iteration, total, prefix='', suffix='', decimals=1, bar_length=1
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
import time
if suffix == '' and start is not None and iteration > 0:
remainder = (total - iteration) * (time.time()-start)/iteration
suffix = '{: 3d}:'.format(int( remainder//3600)) + \
'{:02d}:'.format(int((remainder//60)%60)) + \
'{:02d}' .format(int( remainder %60))
str_format = "{{0:{}.{}f}}".format(decimals+4,decimals)
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '' * filled_length + '-' * (bar_length - filled_length)
sys.stderr.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
sys.stderr.write('\r%s |%s| %s %s' % (prefix, bar, percents+'%', suffix)),
if iteration == total: sys.stderr.write('\n\n')
sys.stderr.flush()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2.7
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os,sys
@ -21,18 +21,19 @@ scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Apply a user-specified function to condense all rows for which column 'label' has identical values into a single row.
Output table will contain as many rows as there are different (unique) values in the grouping column.
Apply a user-specified function to condense into a single row all those rows for which columns 'label' have identical values.
Output table will contain as many rows as there are different (unique) values in the grouping column(s).
Periodic domain averaging of coordinate values is supported.
Examples:
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
""", version = scriptID)
{name} --label texture --function np.average data.txt
""".format(name = scriptName), version = scriptID)
parser.add_option('-l','--label',
dest = 'label',
type = 'string', metavar = 'string',
help = 'column label for grouping rows')
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) for grouping rows')
parser.add_option('-f','--function',
dest = 'function',
type = 'string', metavar = 'string',
@ -40,7 +41,7 @@ parser.add_option('-f','--function',
parser.add_option('-a','--all',
dest = 'all',
action = 'store_true',
help = 'apply mapping function also to grouping column')
help = 'apply mapping function also to grouping column(s)')
group = OptionGroup(parser, "periodic averaging", "")
@ -57,6 +58,7 @@ parser.add_option_group(group)
parser.set_defaults(function = 'np.average',
all = False,
label = [],
boundary = [0.0, 1.0])
(options,filenames) = parser.parse_args()
@ -71,7 +73,7 @@ try:
except:
mapFunction = None
if options.label is None:
if options.label is []:
parser.error('no grouping column specified.')
if not hasattr(mapFunction,'__call__'):
parser.error('function "{}" is not callable.'.format(options.function))
@ -89,13 +91,20 @@ for name in filenames:
# ------------------------------------------ sanity checks ---------------------------------------
remarks = []
errors = []
table.head_read()
if table.label_dimension(options.label) != 1:
damask.util.croak('column {} is not of scalar dimension.'.format(options.label))
table.close(dismiss = True) # close ASCIItable and remove empty file
grpColumns = table.label_index(options.label)[::-1]
grpColumns = grpColumns[np.where(grpColumns>=0)]
if len(grpColumns) == 0: errors.append('no valid grouping column present.')
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
else:
grpColumn = table.label_index(options.label)
# ------------------------------------------ assemble info ---------------------------------------
@ -108,10 +117,9 @@ for name in filenames:
indexrange = table.label_indexrange(options.periodic) if options.periodic is not None else []
rows,cols = table.data.shape
table.data = table.data[np.lexsort([table.data[:,grpColumn]])] # sort data by grpColumn
values,index = np.unique(table.data[:,grpColumn], return_index = True) # unique grpColumn values and their positions
index = np.append(index,rows) # add termination position
table.data = table.data[np.lexsort(table.data[:,grpColumns].T)] # sort data by grpColumn(s)
values,index = np.unique(table.data[:,grpColumns], axis=0, return_index=True) # unique grpColumn values and their positions
index = sorted(np.append(index,rows)) # add termination position
grpTable = np.empty((len(values), cols)) # initialize output
for i in range(len(values)): # iterate over groups (unique values in grpColumn)
@ -119,7 +127,7 @@ for name in filenames:
grpTable[i,indexrange] = \
periodicAverage(table.data[index[i]:index[i+1],indexrange],options.boundary) # apply periodicAverage mapping function
if not options.all: grpTable[i,grpColumn] = table.data[index[i],grpColumn] # restore grouping column value
if not options.all: grpTable[i,grpColumns] = table.data[index[i],grpColumns] # restore grouping column value
table.data = grpTable

View File

@ -830,9 +830,10 @@ if options.info:
elementsOfNode = {}
Nelems = stat['NumberOfElements']
starttime = time.time()
for e in range(Nelems):
if options.verbose and Nelems > 100 and e%(Nelems//100) == 0: # report in 1% steps if possible and avoid modulo by zero
damask.util.progressBar(iteration=e,total=Nelems,prefix='1/3: connecting elements')
damask.util.progressBar(iteration=e,total=Nelems,start=starttime,prefix='1/3: connecting elements')
for n in map(p.node_sequence,p.element(e).items):
if n not in elementsOfNode:
elementsOfNode[n] = [p.element_id(e)]
@ -855,9 +856,10 @@ damask.util.progressBar(iteration=1,total=1,prefix='1/3: connecting elements')
if options.nodalScalar:
Npoints = stat['NumberOfNodes']
starttime = time.time()
for n in range(Npoints):
if options.verbose and Npoints > 100 and e%(Npoints//100) == 0: # report in 1% steps if possible and avoid modulo by zero
damask.util.progressBar(iteration=n,total=Npoints,prefix='2/3: scanning nodes ')
damask.util.progressBar(iteration=n,total=Npoints,start=starttime,prefix='2/3: scanning nodes ')
myNodeID = p.node_id(n)
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
myElemID = 0
@ -892,9 +894,10 @@ if options.nodalScalar:
else:
Nelems = stat['NumberOfElements']
starttime = time.time()
for e in range(Nelems):
if options.verbose and Nelems > 100 and e%(Nelems//100) == 0: # report in 1% steps if possible and avoid modulo by zero
damask.util.progressBar(iteration=e,total=Nelems,prefix='2/3: scanning elements ')
damask.util.progressBar(iteration=e,total=Nelems,start=starttime,prefix='2/3: scanning elements ')
myElemID = p.element_id(e)
myIpCoordinates = ipCoords(p.element(e).type, list(map(lambda node: [node.x, node.y, node.z],
list(map(p.node, map(p.node_sequence, p.element(e).items))))))
@ -1032,10 +1035,11 @@ for incCount,position in enumerate(locations): # walk through locations
member = 0
Ngroups = len(groups)
starttime = time.time()
for j,group in enumerate(groups):
f = incCount*Ngroups + j
if options.verbose and (Ngroups*Nincs) > 100 and f%((Ngroups*Nincs)//100) == 0: # report in 1% steps if possible and avoid modulo by zero
damask.util.progressBar(iteration=f,total=Ngroups*Nincs,prefix='3/3: processing points ')
damask.util.progressBar(iteration=f,total=Ngroups*Nincs,start=starttime,prefix='3/3: processing points ')
N = 0 # group member counter
for (e,n,i,g,n_local) in group[1:]: # loop over group members
member += 1

View File

@ -61,7 +61,14 @@ for name in filenames:
table = damask.ASCIItable(name = name,
buffered = False, labeled = options.labeled, readonly = True)
except: continue
damask.util.report(scriptName,name)
details = ', '.join(
(['header'] if options.table else []) +
(['info'] if options.head or options.info else []) +
(['labels'] if options.head or options.labels else []) +
(['data'] if options.data else []) +
[]
)
damask.util.report(scriptName,name + ('' if details == '' else ' -- '+details))
# ------------------------------------------ output head ---------------------------------------