test for geom_fromTable (does not work for empty black or whitelist)

This commit is contained in:
Martin Diehl 2014-10-13 09:54:01 +00:00
parent eeac38dd1a
commit e1530afac7
2 changed files with 32 additions and 53 deletions

View File

@ -171,8 +171,7 @@ class myThread (threading.Thread):
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Monte Carlo simulation to produce seed file that gives same size distribution like given geometry file. Monte Carlo simulation to produce seed file that gives same size distribution like given geometry file.
""", version=string.replace(scriptID,'\n','\\n') """, version = scriptID)
)
parser.add_option('-s','--seeds', dest='seedFile', metavar='string', parser.add_option('-s','--seeds', dest='seedFile', metavar='string',
help='name of the intial seed file. If not found, a new one is created [%default]') help='name of the intial seed file. If not found, a new one is created [%default]')

View File

@ -1,31 +1,14 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,sys,string,itertools,numpy,damask import os,sys,string,itertools
import numpy as np
from optparse import OptionParser
from collections import defaultdict from collections import defaultdict
from optparse import OptionParser, Option import damask
scriptID = '$Id$'
scriptName = scriptID.split()[1]
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1][:-3]
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
@ -49,16 +32,15 @@ mappings = {
} }
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Create seed file by taking microstructure indices from given ASCIItable column. Create seed file by taking microstructure indices from given ASCIItable column.
White and black-listing of microstructure indices is possible. White and black-listing of microstructure indices is possible.
Examples: Examples:
--white 1,2,5 --index grainID isolates grainID entries of value 1, 2, and 5; --white 1,2,5 --index grainID isolates grainID entries of value 1, 2, and 5;
--black 1 --index grainID takes all grainID entries except for value 1. --black 1 --index grainID takes all grainID entries except for value 1.
""" + string.replace(scriptID,'\n','\\n')
)
""", version = scriptID)
parser.add_option('-p', '--positions', dest = 'pos', type = 'string', parser.add_option('-p', '--positions', dest = 'pos', type = 'string',
help = 'coordinate label') help = 'coordinate label')
@ -73,7 +55,7 @@ parser.add_option('-b','--black', dest = 'blacklist', action = 'extend', type
parser.set_defaults(pos = 'pos') parser.set_defaults(pos = 'pos')
parser.set_defaults(index = 'microstructure') parser.set_defaults(index = 'microstructure')
parser.set_defaults(box = []) parser.set_defaults(box = ())
parser.set_defaults(whitelist = []) parser.set_defaults(whitelist = [])
parser.set_defaults(blacklist = []) parser.set_defaults(blacklist = [])
@ -113,69 +95,67 @@ for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
theTable = damask.ASCIItable(file['input'],file['output'],buffered = False) table = damask.ASCIItable(file['input'],file['output'],buffered = False)
theTable.head_read() table.head_read()
# --------------- figure out columns to process # --------------- figure out columns to process
active = defaultdict(list) active = defaultdict(list)
column = defaultdict(dict) column = defaultdict(dict)
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
foundIt = False foundIt = False
for key in ['1_'+label,label]: for key in ['1_'+label,label]:
if key in theTable.labels: if key in table.labels:
foundIt = True foundIt = True
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = theTable.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
if not foundIt: if not foundIt:
file['croak'].write('column %s not found...\n'%label) file['croak'].write('column %s not found...\n'%label)
break break
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
theTable.data_readArray(list(itertools.chain.from_iterable(map(lambda x:[x+i for i in range(datainfo['vector']['len'])], table.data_readArray(list(itertools.chain.from_iterable(map(lambda x:[x+i for i in range(datainfo['vector']['len'])],
[column['vector'][label] for label in active['vector']]))) + [column['vector'][label] for label in active['vector']]))) +
[column['scalar'][label] for label in active['scalar']]) [column['scalar'][label] for label in active['scalar']])
#--- finding bounding box ------------------------------------------------------------------------------------ #--- finding bounding box ------------------------------------------------------------------------------------
boundingBox = numpy.array((numpy.amin(theTable.data[:,0:3],axis = 0),numpy.amax(theTable.data[:,0:3],axis = 0))) boundingBox = np.array((np.amin(table.data[:,0:3],axis = 0),np.amax(table.data[:,0:3],axis = 0)))
if len(options.box) == 6: if len(options.box) == 6:
boundingBox[0,:] = numpy.minimum(options.box[0:3],boundingBox[0,:]) boundingBox[0,:] = np.minimum(options.box[0:3],boundingBox[0,:])
boundingBox[1,:] = numpy.maximum(options.box[3:6],boundingBox[1,:]) boundingBox[1,:] = np.maximum(options.box[3:6],boundingBox[1,:])
#--- rescaling coordinates ------------------------------------------------------------------------------------ #--- rescaling coordinates ------------------------------------------------------------------------------------
theTable.data[:,0:3] -= boundingBox[0,:] table.data[:,0:3] -= boundingBox[0,:]
theTable.data[:,0:3] /= boundingBox[1,:]-boundingBox[0,:] table.data[:,0:3] /= boundingBox[1,:]-boundingBox[0,:]
#--- filtering of grain voxels ------------------------------------------------------------------------------------ #--- filtering of grain voxels ------------------------------------------------------------------------------------
mask = numpy.logical_and(\ mask = np.logical_and(\
numpy.ones_like(theTable.data[:,3],bool) \ np.ones_like(table.data[:,3],bool) \
if options.whitelist == [] \ if options.whitelist == [] \
else numpy.in1d(theTable.data[:,3].ravel(), options.whitelist).reshape(theTable.data[:,3].shape), else np.in1d(table.data[:,3].ravel(), options.whitelist).reshape(table.data[:,3].shape),
numpy.ones_like(theTable.data[:,3],bool) \ np.ones_like(table.data[:,3],bool) \
if options.blacklist == [] \ if options.blacklist == [] \
else numpy.invert(numpy.in1d(theTable.data[:,3].ravel(), options.blacklist).reshape(theTable.data[:,3].shape)) else np.invert(np.in1d(table.data[:,3].ravel(), options.blacklist).reshape(table.data[:,3].shape))
) )
theTable.data = theTable.data[mask] table.data = table.data[mask]
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
theTable.info = [ table.info = [
scriptID, scriptID,
'size %s'%(' '.join(list(itertools.chain.from_iterable(zip(['x','y','z'], 'size %s'%(' '.join(list(itertools.chain.from_iterable(zip(['x','y','z'],
map(str,boundingBox[1,:]-boundingBox[0,:])))))), map(str,boundingBox[1,:]-boundingBox[0,:])))))),
] ]
theTable.labels_clear() table.labels_clear()
theTable.labels_append(['x','y','z','microstructure']) # implicitly switching label processing/writing on table.labels_append(['x','y','z','microstructure']) # implicitly switching label processing/writing on
theTable.head_write() table.head_write()
theTable.data_writeArray() table.data_writeArray()
theTable.output_flush() table.output_flush()
table.input_close() # close input ASCII table table.input_close() # close input ASCII table
if file['name'] != 'STDIN': if file['name'] != 'STDIN':