renumbering asciitable when readArray for selected columns only

test no longer for deleteColumn
This commit is contained in:
Martin Diehl 2015-05-10 11:29:11 +00:00
parent 8bf7a7ef6a
commit b60f868a76
8 changed files with 117 additions and 140 deletions

View File

@ -258,13 +258,16 @@ class ASCIItable():
read whole data of all (given) labels as numpy array
'''
if labels == []: indices = range(self.__IO__['validReadSize']) # use all columns
else: indices = self.labels_index(labels) # use specified columns
try:
self.data_rewind() # try to wind back to start of data
if labels == []: indices = range(self.__IO__['validReadSize']) # use all columns
else:
indices = self.labels_index(labels) # use specified columns
dictionary = dict(zip(indices, labels))
self.labels_index = range(len(dictionary))
self.labels = [dictionary[label] for label in sorted(dictionary)]
try:
self.data_rewind() # try to wind back to start of data
except:
pass # assume/hope we are at data start already...
pass # assume/hope we are at data start already...
self.data = np.loadtxt(self.__IO__['in'], usecols=indices,ndmin=2)
return self.data.shape

View File

@ -10,11 +10,6 @@ import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = os.path.splitext(scriptID.split()[1])[0]
#--------------------------------------------------------------------------------------------------
#> @brief calculates curl field using differentation in Fourier space
#> @todo enable odd resolution
#--------------------------------------------------------------------------------------------------
def curlFFT(geomdim,field):
grid = np.array(np.shape(field)[0:3])
wgt = 1.0/np.array(grid).prod()
@ -74,18 +69,16 @@ Deals with both vector- and tensor-valued fields.
""", version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
help='column heading for coordinates [%default]')
help='column heading for coordinates [%default]')
parser.add_option('-v','--vector', dest='vector', action='extend', metavar='<string LIST>',
help='heading of columns containing vector field values')
help='heading of columns containing vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', metavar='<string LIST>',
help='heading of columns containing tensor field values')
help='heading of columns containing tensor field values')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(vector = [])
parser.set_defaults(tensor = [])
(options,filenames) = parser.parse_args()
if len(options.vector) + len(options.tensor) == 0:
if options.vector == None and options.tensor == None:
parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype
@ -102,35 +95,33 @@ if options.tensor != None: datainfo['tensor']['label'] = options.tensor
# ------------------------------------------ setup file handles ------------------------------------
files = []
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files -------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],True) # make unbuffered ASCII_table
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.data_readArray()
# --------------- figure out columns for coordinates and vector/tensor fields to process ---------
column = defaultdict(dict)
pos = 0 # when reading in the table via data_readArray, the first key is at colum 0
try:
column['coords'] = pos
pos+=3 # advance by data len (columns) for next key
keys=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
except ValueError:
try:
column['coords'] = pos
pos+=3 # advance by data len (columns) for next key
directions = ['x','y','z']
keys=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
except ValueError:
# --------------- figure out name of coordinate data (support for legacy .x notation) -------------
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
directions = ['x','y','z']
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
continue
coordColumns = [table.labels.index(label) for label in coordLabels]
# --------------- figure out active columns -------------------------------------------------------
active = defaultdict(list)
for datatype,info in datainfo.items():
for label in info['label']:
@ -139,13 +130,10 @@ for file in files:
file['croak'].write('column %s not found...\n'%key)
else:
active[datatype].append(label)
column[label] = pos
pos+=datainfo[datatype]['len']
keys+=['%i_%s'%(i+1,label) for i in xrange(datainfo[datatype]['len'])] # extend ASCII header with new labels
table.data_readArray(keys)
# --------------- assemble new header (columns containing curl) -----------------------------------
# --------------- assemble new header (metadata and columns containing curl) ----------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels:
table.labels_append(['%i_curlFFT(%s)'%(i+1,label) for i in xrange(datainfo[datatype]['len'])])# extend ASCII header with new labels
@ -155,10 +143,8 @@ for file in files:
coords = [{},{},{}]
for i in xrange(table.data.shape[0]):
for j in xrange(3):
coords[j][str(table.data[i,j])] = True # remember coordinate along x,y,z
grid = np.array([len(coords[0]),\
len(coords[1]),\
len(coords[2]),],'i') # grid is number of distinct coordinates found
coords[j][str(table.data[i,coordColumns[j]])] = True
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
@ -169,29 +155,26 @@ for file in files:
mask = np.ones(3,dtype=bool)
mask[i]=0
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
# ------------------------------------------ process value field -----------------------------------
curl = defaultdict(dict)
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested curls
startColumn=table.labels.index('1_'+label)
curl[datatype][label] = curlFFT(size[::-1], # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
table.data[:,column[label]:column[label]+datainfo[datatype]['len']].\
table.data[:,startColumn:startColumn+datainfo[datatype]['len']].\
reshape([grid[2],grid[1],grid[0]]+datainfo[datatype]['shape']))
# ------------------------------------------ process data ------------------------------------------
table.data_rewind()
idx = 0
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested norms
table.data_append(list(curl[datatype][label][idx,:]))
idx+=1
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ add data ------------------------------------------
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested curls
for c in xrange(curl[datatype][label][0,:].shape[0]): # append column by column
lastRow = table.data.shape[1]
table.data=np.insert(table.data,lastRow,curl[datatype][label][:,c],1)
table.input_close() # close input ASCII table
table.output_close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray('%.12g')
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -106,11 +106,11 @@ parser.set_defaults(scale = 1.0)
(options,filenames) = parser.parse_args()
if options.type == None:
parser.error('please select a feature type')
parser.error('no feature type selected...')
if not set(options.type).issubset(set(list(itertools.chain(*map(lambda x: x['names'],features))))):
parser.error('type must be chosen from (%s)...'%(', '.join(map(lambda x:'|'.join(x['names']),features))) )
if 'biplane' in options.type and 'boundary' in options.type:
parser.error("both aliases 'biplane' and 'boundary' are selected")
parser.error("both aliases 'biplane' and 'boundary' are selected...")
feature_list = []
for i,feature in enumerate(features):
@ -131,45 +131,44 @@ for file in files:
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.data_readArray()
# --------------- figure out size and grid ---------------------------------------------------------
try:
locationCol = table.labels.index('1_%s'%options.coords) # columns containing location data
except ValueError:
try:
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data (legacy naming scheme)
except ValueError:
file['croak'].write('no coordinate data (1_%s/%s.x) found...\n'%(options.coords,options.coords))
# --------------- figure out name of coordinate data (support for legacy .x notation) ------------
coordLabels=['%i_%s'%(i+1,options.coords) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
directions = ['x','y','z']
coordLabels=['%s.%s'%(options.coords,directions[i]) for i in xrange(3)] # store labels for column keys
if not set(coordLabels).issubset(table.labels):
file['croak'].write('no coordinate data (1_%s) found...\n'%options.coords)
continue
coordColumns = [table.labels.index(label) for label in coordLabels]
# --------------- figure out active column --------------------------------------------------------
if options.id not in table.labels:
file['croak'].write('column %s not found...\n'%options.id)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for feature in feature_list:
table.labels_append('ED_%s(%s)'%(features[feature]['names'][0],options.id)) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
table.data_readArray(['1_'+options.coords,'2_'+options.coords,'3_'+options.coords,options.id])
# --------------- figure out grid -----------------------------------------------------------------
coords = [{},{},{}]
for i in xrange(len(table.data)):
for j in xrange(3):
coords[j][str(table.data[i,j])] = True
coords[j][str(table.data[i,coordColumns[j]])] = True
grid = np.array(map(len,coords),'i')
# ------------------------------------------ process value field -----------------------------------
unitlength = 0.0
for i,r in enumerate(grid):
if r > 1: unitlength = max(unitlength,(max(map(float,coords[i].keys()))-min(map(float,coords[i].keys())))/(r-1.0))
neighborhood = neighborhoods[options.neighborhood]
convoluted = np.empty([len(neighborhood)]+list(grid+2),'i')
microstructure = periodic_3Dpad(np.array(table.data[:,3].reshape(grid),'i'))
microstructure = periodic_3Dpad(np.array(table.data[:,table.labels.index(options.id)].reshape(grid),'i'))
for i,p in enumerate(neighborhood):
stencil = np.zeros((3,3,3),'i')
@ -197,18 +196,14 @@ for file in files:
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
distance.shape = (len(feature_list),grid.prod())
# ------------------------------------------ process data ------------------------------------------
table.data_rewind()
l = 0
while table.data_read():
for i in xrange(len(feature_list)):
table.data_append(distance[i,l]) # add all distance fields
l += 1
outputAlive = table.data_write() # output processed line
# ------------------------------------------ add data ------------------------------------------
for i in xrange(len(feature_list)):
lastRow = table.data.shape[1]
table.data=np.insert(table.data,lastRow,distance[i,:],1)
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.input_close() # close input ASCII table
table.output_close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
table.data_writeArray('%.12g')
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -60,21 +60,17 @@ if options.asciitable != None and os.path.isfile(options.asciitable):
mappedTable.head_read() # read ASCII header info of mapped table
labels = []
indices = []
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_'+label if info['len'] > 1 else label
if key in mappedTable.labels:
labels.append(label) # extend labels
indices += range(mappedTable.labels.index(key),
mappedTable.labels.index(key)+datainfo[datatype]['len'])
keys = ['%i_'%(i+1)+label for i in xrange(info['len'])] if info['len'] > 1 else [label]
if set(keys).issubset(mappedTable.labels):
labels+=keys # extend labels
else:
sys.stderr.write('column %s not found...\n'%label)
break
mappedTable.data_readArray(indices)
mappedTable.input_close() # close mapped input ASCII table
mappedTable.data_readArray(labels)
mappedTable.input_close() # close mapped input ASCII table
else:
parser.error('missing mapped ASCIItable...')
@ -95,17 +91,15 @@ for file in files:
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
if options.map not in table.labels:
file['croak'].write('column %s not found...\n'%options.map)
continue
# ------------------------------------------ assemble header --------------------------------------
for datatype,info in datainfo.items():
for label in info['label']:
table.labels_append(label if info['len'] == 1 else \
['%i_%s'%(i+1,label) for i in xrange(info['len'])]) # extend ASCII header of current table with new labels
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for label in mappedTable.labels:
table.labels_append(label)
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -56,23 +56,24 @@ datainfo = { # lis
'label':[]},
}
if not set(options.output).issubset(set(outputChoices)):
if options.output == None or (not set(options.output).issubset(set(outputChoices))):
parser.error('output must be chosen from %s...'%(', '.join(outputChoices)))
input=[]
if options.eulers != None:
datainfo['vector']['label'] += [options.eulers]
input = 'eulers'
input.append('eulers')
if options.a != None and \
options.b != None and \
options.c != None:
datainfo['vector']['label'] += [options.a,options.b,options.c]
input = 'frame'
input.append('frame')
if options.matrix != None:
datainfo['tensor']['label'] += [options.matrix]
input = 'matrix'
input.append('matrix')
if options.quaternion != None:
datainfo['quaternion']['label'] += [options.quaternion]
input = 'quaternion'
input.append('quaternion')
if len(input) != 1: parser.error('needs exactly one input format...')
input = input[0]

View File

@ -24,7 +24,7 @@ parser.add_option('-f','--defgrad', dest='defgrad', metavar='string',
parser.add_option('-p','--stress', dest='stress', metavar='string',
help='heading of columns containing first Piola--Kirchhoff stress [%default]')
parser.set_defaults(defgrad = 'f')
parser.set_defaults(stress = 'p')
parser.set_defaults(stress = 'p')
(options,filenames) = parser.parse_args()
@ -55,12 +55,9 @@ for file in files:
file['croak'].write('column %s not found...\n'%key)
missingColumns=True
if missingColumns: continue
active = defaultdict(list)
column = defaultdict(dict)
missingColumns = False
# ------------------------------------------ assemble header --------------------------------------
table.labels_append(['%i_S'%(i+1) for i in xrange(datainfo['stress']['len'])]) # extend ASCII header with new labels
table.labels_append(['%i_S'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -82,39 +82,34 @@ for name in filenames:
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
# --------------- figure out columns to process ---------------------------------------------------
# --------------- figure out columns to process and read ------------------------------------------
active = []
column = {}
for label in datainfo['scalar']['label']:
if label in table.labels:
active.append(label)
column[label] = table.labels.index(label) # remember columns of requested data
else:
file['croak'].write('column %s not found...\n'%label)
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels = ['bin_%s'%options.data[0],'bin_%s'%options.data[1],'z']
table.head_write()
table.data_readArray([label for label in active])
# ------------------------------------------ process data ------------------------------------------
table.data_readArray([column[label] for label in active])
for i in (0,1): # check data minmax for x and y
for j in (0,1): # check data minmax for x and y
i = table.labels.index(options.data[j])
if (minmax[i] == 0.0).all(): minmax[i] = [table.data[:,i].min(),table.data[:,i].max()]
if options.type[i].lower() == 'log': # if log scale
table.data[:,i] = np.log(table.data[:,i]) # change x,y coordinates to log
minmax[i] = np.log(minmax[i]) # change minmax to log, too
if options.type[i].lower() == 'log': # if log scale
table.data[:,i] = np.log(table.data[:,i]) # change x,y coordinates to log
minmax[i] = np.log(minmax[i]) # change minmax to log, too
delta = minmax[:,1]-minmax[:,0]
xCol = table.labels.index(options.data[0])
yCol = table.labels.index(options.data[1])
if options.weight != None: wCol = table.labels.index(options.weight)
for i in xrange(len(table.data)):
x = int(options.bins[0]*(table.data[i,0]-minmax[0,0])/delta[0])
y = int(options.bins[1]*(table.data[i,1]-minmax[1,0])/delta[1])
x = int(options.bins[0]*(table.data[i,xCol]-minmax[0,0])/delta[0])
y = int(options.bins[1]*(table.data[i,yCol]-minmax[1,0])/delta[1])
if x >= 0 and x < options.bins[0] and y >= 0 and y < options.bins[1]:
grid[x,y] += 1. if options.weight == None else table.data[i,2] # count (weighted) occurrences
grid[x,y] += 1. if options.weight == None else table.data[i,wCol] # count (weighted) occurrences
if options.normCol:
for x in xrange(options.bins[0]):
@ -151,7 +146,13 @@ for name in filenames:
if options.invert: result[:,:,2] = 1.0 - result[:,:,2]
# ------------------------------------------ output result -----------------------------------------
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels = ['bin_%s'%options.data[0],'bin_%s'%options.data[1],'z']
table.head_write()
# ------------------------------------------ output result -----------------------------------------
prefix = 'binned%s-%s_'%(options.data[0],options.data[1])+ \
('weighted%s_'%(options.weight) if options.weight != None else '')
np.savetxt(file['output'],result.reshape(options.bins[0]*options.bins[1],3))

View File

@ -149,6 +149,9 @@ for file in files:
newInfo['size'] = info['size']/info['grid']*newInfo['grid']
newInfo['origin'] = info['origin']+info['size']/info['grid']*options.offset
a = np.bincount(microstructure_cropped.flatten())
b = np.nonzero(a)[0]
y = np.size(b)
newInfo['microstructures'] = microstructure_cropped.max()