more improved scripts
This commit is contained in:
parent
9cfc27d295
commit
98476ea29c
|
@ -102,7 +102,7 @@ for name in filenames:
|
|||
maxcorner = np.array(map(max,coords))
|
||||
grid = np.array(map(len,coords),'i')
|
||||
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
|
||||
delta = size/np.maximum(np.ones(3,'d'), grid)
|
||||
origin = mincorner - 0.5*delta # shift from cell center to corner
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ parser.add_option('-l','--label',
|
|||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.label == None:
|
||||
if options.label is None:
|
||||
parser.error('no grouping column specified.')
|
||||
|
||||
|
||||
|
|
|
@ -77,11 +77,11 @@ minmax = np.array([np.array(options.xrange),
|
|||
grid = np.zeros(options.bins,'f')
|
||||
result = np.zeros((options.bins[0],options.bins[1],3),'f')
|
||||
|
||||
if options.data == None: parser.error('no data columns specified.')
|
||||
if options.data is None: parser.error('no data columns specified.')
|
||||
|
||||
labels = options.data
|
||||
|
||||
if options.weight != None: labels += [options.weight] # prevent character splitting of single string value
|
||||
if options.weight is not None: labels += [options.weight] # prevent character splitting of single string value
|
||||
|
||||
# --- loop over input files -------------------------------------------------------------------------
|
||||
|
||||
|
@ -124,7 +124,7 @@ for name in filenames:
|
|||
x = int(options.bins[0]*(table.data[i,0]-minmax[0,0])/delta[0])
|
||||
y = int(options.bins[1]*(table.data[i,1]-minmax[1,0])/delta[1])
|
||||
if x >= 0 and x < options.bins[0] and y >= 0 and y < options.bins[1]:
|
||||
grid[x,y] += 1. if options.weight == None else table.data[i,2] # count (weighted) occurrences
|
||||
grid[x,y] += 1. if options.weight is None else table.data[i,2] # count (weighted) occurrences
|
||||
|
||||
if options.normCol:
|
||||
for x in xrange(options.bins[0]):
|
||||
|
|
|
@ -86,7 +86,7 @@ for name in filenames:
|
|||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
||||
],'d') # size from bounding box, corrected for cell-centeredness
|
||||
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings
|
||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
|
||||
|
||||
|
||||
packing = np.array(options.packing,'i')
|
||||
|
|
|
@ -43,7 +43,7 @@ parser.set_defaults(condition = '',
|
|||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.labels == None or options.formulae == None:
|
||||
if options.labels is None or options.formulae is None:
|
||||
parser.error('no formulae specified.')
|
||||
if len(options.labels) != len(options.formulae):
|
||||
parser.error('number of labels ({}) and formulae ({}) do not match.'.format(len(options.labels),len(options.formulae)))
|
||||
|
|
|
@ -80,14 +80,14 @@ for name in filenames:
|
|||
positions = []
|
||||
|
||||
for position,label in enumerate(table.labels):
|
||||
if (options.whitelist == None or any([ position in table.label_indexrange(needle) \
|
||||
if (options.whitelist is None or any([ position in table.label_indexrange(needle) \
|
||||
or fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
|
||||
and (options.blacklist == None or not any([ position in table.label_indexrange(needle) \
|
||||
and (options.blacklist is None or not any([ position in table.label_indexrange(needle) \
|
||||
or fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
|
||||
labels.append(label) # remember name...
|
||||
positions.append(position) # ...and position
|
||||
|
||||
if len(labels) > 0 and options.whitelist != None and options.blacklist == None: # check whether reordering is possible
|
||||
if len(labels) > 0 and options.whitelist is not None and options.blacklist is None: # check whether reordering is possible
|
||||
whitelistitem = np.zeros(len(labels),dtype=int)
|
||||
for i,label in enumerate(labels): # check each selected label
|
||||
match = [ positions[i] in table.label_indexrange(needle) \
|
||||
|
@ -119,7 +119,7 @@ for name in filenames:
|
|||
|
||||
# ------------------------------------------ assemble header ---------------------------------------
|
||||
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # read ASCII header info
|
||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
||||
table.labels_clear()
|
||||
table.labels_append(np.array(labels)[order]) # update with new label set
|
||||
table.head_write()
|
||||
|
|
|
@ -112,7 +112,7 @@ for name in filenames:
|
|||
try:
|
||||
table = damask.ASCIItable(name = name,
|
||||
buffered = False,
|
||||
labeled = options.label != None,
|
||||
labeled = options.label is not None,
|
||||
readonly = True)
|
||||
except: continue
|
||||
table.report_name(scriptName,name)
|
||||
|
@ -162,8 +162,8 @@ for name in filenames:
|
|||
|
||||
nodes -= boundingBox[0].repeat(np.prod(options.dimension+1)).reshape([3]+list(options.dimension+1))
|
||||
nodes *= (options.pixelsize*options.dimension/options.size).repeat(np.prod(options.dimension+1)).reshape([3]+list(options.dimension+1))
|
||||
imagesize = (options.pixelsize*(boundingBox[1]-boundingBox[0])*options.dimension\
|
||||
/options.size)[:2].astype('i') # determine image size from number of cells in overall bounding box
|
||||
imagesize = (options.pixelsize*(boundingBox[1]-boundingBox[0])* # determine image size from number of
|
||||
options.dimension/options.size)[:2].astype('i') # cells in overall bounding box
|
||||
im = Image.new('RGBA',imagesize)
|
||||
draw = ImageDraw.Draw(im)
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ for name in filenames:
|
|||
try:
|
||||
table = damask.ASCIItable(name = name,
|
||||
buffered = False,
|
||||
labeled = options.label != None,
|
||||
labeled = options.label is not None,
|
||||
readonly = True)
|
||||
except: continue
|
||||
damask.util.report(scriptName,name)
|
||||
|
@ -104,7 +104,6 @@ for name in filenames:
|
|||
damask.util.croak(errors)
|
||||
table.close(dismiss = True) # close ASCII table file handles and delete output file
|
||||
continue
|
||||
|
||||
# convert data to shape and arrange according to given options
|
||||
if options.dimension != []: table.data = table.data.reshape(options.dimension[1],options.dimension[0],3)
|
||||
if options.flipLR: table.data = np.fliplr(table.data)
|
||||
|
|
|
@ -36,7 +36,7 @@ parser.set_defaults(key = [],
|
|||
|
||||
(options,filenames) = parser.parse_args()
|
||||
|
||||
if options.keys == None:
|
||||
if options.keys is None:
|
||||
parser.error('No sorting column(s) specified.')
|
||||
|
||||
options.keys.reverse() # numpy sorts with most significant column as last
|
||||
|
|
|
@ -49,8 +49,8 @@ Polydata = reader.GetOutput()
|
|||
|
||||
if Npoints != Ncells or Npoints != Nvertices:
|
||||
parser.error('Number of points, cells, and vertices in VTK differ from each other'); sys.exit()
|
||||
if options.scalar != None: datainfo['scalar']['label'] += options.scalar
|
||||
if options.color != None: datainfo['color']['label'] += options.color
|
||||
if options.scalar is not None: datainfo['scalar']['label'] += options.scalar
|
||||
if options.color is not None: datainfo['color']['label'] += options.color
|
||||
|
||||
# ------------------------------------------ setup file handles ---------------------------------------
|
||||
|
||||
|
|
Loading…
Reference in New Issue