added some more post processing tests and improved output

This commit is contained in:
Martin Diehl 2014-08-06 19:06:33 +00:00
parent 57e3cf00c2
commit 649e637561
27 changed files with 333 additions and 489 deletions

View File

@ -199,7 +199,6 @@ def doSim(delay,thread):
global stressAll
stressAll=np.append(yieldStress,stressAll)
print np.shape(stressAll)
print('starting fitting for sim %i from %s'%(me,thread))
myFit.fit(stressAll.reshape(len(stressAll)//9,9).transpose())
s.release()

View File

@ -8,7 +8,7 @@ from vtk.util import numpy_support
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# -----------------------------
class backgroundMessage(threading.Thread):

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def unravel(item):
if hasattr(item,'__contains__'): return ' '.join(map(unravel,item))

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -74,7 +74,7 @@ for file in files:
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # dimension from bounding box, corrected for cell-centeredness
],'d') # size from bounding box, corrected for cell-centeredness
for i, points in enumerate(grid):
if points == 1:
@ -87,18 +87,12 @@ for file in files:
N = grid.prod()
# --------------- figure out columns to process ---------------------------------------------------
missingColumns = False
for label in datainfo['defgrad']['label']:
key = '1_%s'%label
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True
else:
column = table.labels.index(key) # remember columns of requested data
if missingColumns:
key = '1_%s'%datainfo['defgrad']['label'][0]
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
continue
else:
column = table.labels.index(key) # remember columns of requested data
# ------------------------------------------ assemble header ---------------------------------------
if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad])

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -71,7 +71,7 @@ for file in files:
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # dimension from bounding box, corrected for cell-centeredness
],'d') # size from bounding box, corrected for cell-centeredness
for i, points in enumerate(grid):
if points == 1:
@ -84,18 +84,12 @@ for file in files:
N = grid.prod()
# --------------- figure out columns to process ---------------------------------------------------
missingColumns = False
for label in datainfo['defgrad']['label']:
key = '1_%s'%label
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True
else:
column = table.labels.index(key) # remember columns of requested data
if missingColumns:
key = '1_%s'%datainfo['defgrad']['label'][0]
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
continue
else:
column = table.labels.index(key) # remember columns of requested data
# ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%s_coords'%(coord+1) for coord in xrange(3)]) # extend ASCII header with new labels

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def determinant(m):
return +m[0]*m[4]*m[8] \
@ -63,6 +63,7 @@ for file in files:
active = []
column = defaultdict(dict)
# --------------- figure out columns to process ---------------------------------------------------
for label in datainfo['tensor']['label']:
key = '1_%s'%label
if key not in table.labels:

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
oneThird = 1.0/3.0

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def normalize(vec):
return vec/np.sqrt(np.inner(vec,vec))

View File

@ -8,7 +8,7 @@ from scipy import ndimage
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def periodic_3Dpad(array, rimdim=(1,1,1)):

View File

@ -3,12 +3,11 @@
import os,sys,string
import numpy as np
from collections import defaultdict
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -53,12 +52,20 @@ datainfo = {
'label':[]},
}
if options.eulers != None: datainfo['vector']['label'] += [options.eulers]; input = 'eulers'
if options.eulers != None:
datainfo['vector']['label'] += [options.eulers]
input = 'eulers'
if options.a != None and \
options.b != None and \
options.c != None: datainfo['vector']['label'] += [options.a,options.b,options.c]; input = 'frame'
if options.matrix != None: datainfo['tensor']['label'] += [options.matrix]; input = 'matrix'
if options.quaternion != None: datainfo['quaternion']['label'] += [options.quaternion]; input = 'quaternion'
options.c != None:
datainfo['vector']['label'] += [options.a,options.b,options.c]
input = 'frame'
if options.matrix != None:
datainfo['tensor']['label'] += [options.matrix]
input = 'matrix'
if options.quaternion != None:
datainfo['quaternion']['label'] += [options.quaternion]
input = 'quaternion'
inputGiven = 0
for datatype,info in datainfo.items():
@ -87,8 +94,7 @@ for file in files:
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = defaultdict(list)
column = defaultdict(dict)
column = {}
missingColumns = False
for datatype,info in datainfo.items():
@ -98,8 +104,7 @@ for file in files:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True # break if label not found
else:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
column[label] = table.labels.index(key) # remember columns of requested data
if missingColumns:
continue
@ -112,25 +117,29 @@ for file in files:
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
if input == 'eulers':
o = damask.Orientation(Eulers=toRadians*np.array(map(float,table.data[column['vector'][options.eulers]:\
column['vector'][options.eulers]+datainfo['vector']['len']])),
o = damask.Orientation(Eulers=toRadians*\
np.array(map(float,table.data[column[options.eulers]:\
column[options.eulers]+datainfo['vector']['len']])),
symmetry=options.symmetry).reduced()
elif input == 'matrix':
o = damask.Orientation(matrix=np.array([map(float,table.data[column['tensor'][options.matrix]:\
column['tensor'][options.matrix]+datainfo['tensor']['len']])]),
o = damask.Orientation(matrix=\
np.array([map(float,table.data[column[options.matrix]:\
column[options.matrix]+datainfo['tensor']['len']])]),
symmetry=options.symmetry).reduced()
elif input == 'frame':
o = damask.Orientation(matrix=np.array([map(float,table.data[column['vector'][options.a]:\
column['vector'][options.a]+datainfo['vector']['len']] + \
table.data[column['vector'][options.b]:\
column['vector'][options.b]+datainfo['vector']['len']] + \
table.data[column['vector'][options.c]:\
column['vector'][options.c]+datainfo['vector']['len']]
o = damask.Orientation(matrix=\
np.array([map(float,table.data[column[options.a]:\
column[options.a]+datainfo['vector']['len']] + \
table.data[column[options.b]:\
column[options.b]+datainfo['vector']['len']] + \
table.data[column[options.c]:\
column[options.c]+datainfo['vector']['len']]
)]).reshape(3,3),
symmetry=options.symmetry).reduced()
elif input == 'quaternion':
o = damask.Orientation(quaternion=np.array(map(float,table.data[column['quaternion'][options.quaternion]:\
column['quaternion'][options.quaternion]+datainfo['quaternion']['len']])),
o = damask.Orientation(quaternion=\
np.array(map(float,table.data[column[options.quaternion]:\
column[options.quaternion]+datainfo['quaternion']['len']])),
symmetry=options.symmetry).reduced()
table.data_append(o.IPFcolor(pole))

View File

@ -6,7 +6,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id: addCauchy.py 3331 2014-08-04 17:53:41Z MPIE\m.diehl $','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -67,14 +67,12 @@ if options.asciitable != None and os.path.isfile(options.asciitable):
for datatype,info in datainfo.items():
for label in info['label']:
foundIt = False
for key in ['1_'+label,label]:
if key in mappedTable.labels:
foundIt = True
labels.append(label) # extend labels
indices += range(mappedTable.labels.index(key),
mappedTable.labels.index(key)+datainfo[datatype]['len'])
if not foundIt:
key = {True:'1_'+label,False:label}[info['len']==1]
if key in mappedTable.labels:
labels.append(label) # extend labels
indices += range(mappedTable.labels.index(key),
mappedTable.labels.index(key)+datainfo[datatype]['len'])
else:
file['croak'].write('column %s not found...\n'%label)
break

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def Mises(what,tensor):

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# definition of element-wise p-norms for matrices
def normAbs(object): # p = 1

View File

@ -3,12 +3,11 @@
import os,sys,string,itertools
import numpy as np
from collections import defaultdict
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -105,8 +104,7 @@ for file in files:
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = defaultdict(list)
column = defaultdict(dict)
column = {}
missingColumns = False
for datatype,info in datainfo.items():
@ -116,8 +114,7 @@ for file in files:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True # break if label not found
else:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
column[label] = table.labels.index(key) # remember columns of requested data
if missingColumns:
continue
@ -135,28 +132,28 @@ for file in files:
while outputAlive and table.data_read(): # read next data line of ASCII table
if input == 'eulers':
o = damask.Orientation(Eulers=toRadians*\
np.array(map(float,table.data[column['vector'][options.eulers]:\
column['vector'][options.eulers]+datainfo['vector']['len']])),
np.array(map(float,table.data[column[options.eulers]:\
column[options.eulers]+datainfo['vector']['len']])),
symmetry=options.symmetry).reduced()
elif input == 'matrix':
o = damask.Orientation(matrix=\
np.array([map(float,table.data[column['tensor'][options.matrix]:\
column['tensor'][options.matrix]+datainfo['tensor']['len']])]),
np.array([map(float,table.data[column[options.matrix]:\
column[options.matrix]+datainfo['tensor']['len']])]),
symmetry=options.symmetry).reduced()
elif input == 'frame':
o = damask.Orientation(matrix=\
np.array([map(float,table.data[column['vector'][options.a]:\
column['vector'][options.a]+datainfo['vector']['len']] + \
table.data[column['vector'][options.b]:\
column['vector'][options.b]+datainfo['vector']['len']] + \
table.data[column['vector'][options.c]:\
column['vector'][options.c]+datainfo['vector']['len']]
np.array([map(float,table.data[column[options.a]:\
column[options.a]+datainfo['vector']['len']] + \
table.data[column[options.b]:\
column[options.b]+datainfo['vector']['len']] + \
table.data[column[options.c]:\
column[options.c]+datainfo['vector']['len']]
)]).reshape(3,3),
symmetry=options.symmetry).reduced()
elif input == 'quaternion':
o = damask.Orientation(quaternion=\
np.array(map(float,table.data[column['quaternion'][options.quaternion]:\
column['quaternion'][options.quaternion]+datainfo['quaternion']['len']])),
np.array(map(float,table.data[column[options.quaternion]:\
column[options.quaternion]+datainfo['quaternion']['len']])),
symmetry=options.symmetry).reduced()
o.quaternion = r*o.quaternion

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN

View File

@ -3,12 +3,11 @@
import os,sys,math,string
import numpy as np
from collections import defaultdict
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
slipnormal_temp = [
[0,0,0,1],
@ -327,20 +326,12 @@ for file in files:
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
active = defaultdict(list)
column = defaultdict(dict)
for datatype,info in datainfo.items():
for label in info['label']:
foundIt = False
for key in ['1_'+label,label]:
if key in table.labels:
foundIt = True
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
if not foundIt:
file['croak'].write('column %s not found...\n'%label)
break
key = '1_%s'%datainfo['vector']['label'][0]
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
continue
else:
column = table.labels.index(key) # remember columns of requested data
# ------------------------------------------ assemble header ---------------------------------------
@ -364,8 +355,7 @@ for file in files:
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
[phi1,Phi,phi2] = Eulers=toRadians*np.array(map(\
float,table.data[column['vector'][options.eulers]:\
column['vector'][options.eulers]+datainfo['vector']['len']]))
float,table.data[column:column+datainfo['vector']['len']]))
S = [ sum( [applyEulers(phi1,Phi,phi2,normalize( \
slipnormal[options.lattice][slipsystem]))[i]*options.stressnormal[i] for i in range(3)] ) * \
sum( [applyEulers(phi1,Phi,phi2,normalize( \

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN

View File

@ -8,7 +8,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
def operator(stretch,strain,eigenvalues):
return {

View File

@ -7,7 +7,7 @@ from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1]
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
@ -47,10 +47,9 @@ if np.any(options.shift != 0):
files = []
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'croak':sys.stderr,\
'output':open(os.path.join(os.path.dirname(name),prefix+os.path.basename(name)),'w')})
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files ------------------------------------------------------------------------
#--- loop over input files -------------------------------------------------------------------------
for file in files:
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
@ -58,7 +57,7 @@ for file in files:
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out size and grid ----------------------------------------------
# --------------- figure out size and grid ---------------------------------------------------------
try:
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data
elemCol = table.labels.index('elem')
@ -78,7 +77,7 @@ for file in files:
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # dimension from bounding box, corrected for cell-centeredness
],'d') # size from bounding box, corrected for cell-centeredness
origin = np.array([min(map(float,coords[0].keys())),\
min(map(float,coords[1].keys())),\
min(map(float,coords[2].keys())),\
@ -104,7 +103,7 @@ for file in files:
# ------------------------------------------ assemble header ---------------------------------------
table.head_write()
# ------------------------------------------ process data -----------------------------------------
# ------------------------------------------ process data ------------------------------------------
table.data_rewind()
data = np.zeros(outSize.tolist()+[len(table.labels)])
p = np.zeros(3,'i')
@ -128,11 +127,13 @@ for file in files:
data[a,b,c,locationCol+i] = posOffset[i] + x*elementSize[i] + origin[i]
data[a,b,c,elemCol] = elem
table.data = data[a,b,c,:].tolist()
table.data_write() # output processed line
outputAlive = table.data_write() # output processed line
elem += 1
# ------------------------------------------ output result ----------------------------------------
table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
file['input'].close() # close input ASCII table
file['output'].close() # close output ASCII table
os.rename(file['name']+'_tmp',\
os.path.join(os.path.dirname(file['name']),prefix+os.path.basename(file['name'])))

View File

@ -1,112 +1,84 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,string,numpy
from optparse import OptionParser, Option
scriptID = '$Id$'
scriptName = scriptID.split()[1]
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
import os,sys,string
import numpy as np
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Produces a binned grid of two columns from an ASCIItable, i.e. a two-dimensional probability density map.
""" + string.replace(scriptID,'\n','\\n')
)
""", version = scriptID)
parser.add_option('-d','--data', dest='data', nargs=2, type='int',
help='columns containing x and y')
parser.add_option('-w','--weight', dest='weight', type='int',
help='column containing weight of (x,y) point')
parser.add_option('-b','--bins', dest='bins', nargs=2, type='int',
help='number of bins in x and y direction')
parser.add_option('-t','--type', dest='type', nargs=3, type='string',
help='type of x, y, and z axis [linear]')
parser.add_option('-x','--xrange', dest='xrange', nargs=2, type='float',
parser.add_option('-d','--data', dest='data', action='store', nargs=2, type='int', metavar='int int',
help='columns containing x and y %default')
parser.add_option('-w','--weight', dest='weight', action='store', metavar='int',
help='column containing weight of (x,y) point [%default]')
parser.add_option('-b','--bins', dest='bins', action='store', nargs=2, type='int', metavar='int int',
help='number of bins in x and y direction %default')
parser.add_option('-t','--type', dest='type', action='store', nargs=3, type='string', metavar='string string string',
help='type (linear/log) of x, y, and z axis [linear]')
parser.add_option('-x','--xrange', dest='xrange', action='store', nargs=2, type='float', metavar='float float',
help='value range in x direction [auto]')
parser.add_option('-y','--yrange', dest='yrange', nargs=2, type='float',
parser.add_option('-y','--yrange', dest='yrange', action='store', nargs=2, type='float', metavar='float float',
help='value range in y direction [auto]')
parser.add_option('-z','--zrange', dest='zrange', nargs=2, type='float',
parser.add_option('-z','--zrange', dest='zrange', action='store', nargs=2, type='float', metavar='float float',
help='value range in z direction [auto]')
parser.add_option('-i','--invert', dest='invert', action='store_true',
help='invert probability density')
help='invert probability density [%default]')
parser.set_defaults(data = [1,2])
parser.set_defaults(data = (1,2))
parser.set_defaults(weight = None)
parser.set_defaults(bins = [10,10])
parser.set_defaults(type = ['linear','linear','linear'])
parser.set_defaults(xrange = [0.0,0.0])
parser.set_defaults(yrange = [0.0,0.0])
parser.set_defaults(zrange = [0.0,0.0])
parser.set_defaults(bins = (10,10))
parser.set_defaults(type = ('linear','linear','linear'))
parser.set_defaults(xrange = (0.0,0.0))
parser.set_defaults(yrange = (0.0,0.0))
parser.set_defaults(zrange = (0.0,0.0))
parser.set_defaults(invert = False)
(options,filenames) = parser.parse_args()
range = numpy.array([numpy.array(options.xrange),
numpy.array(options.yrange),
numpy.array(options.zrange)])
grid = numpy.zeros(options.bins,'i')
result = numpy.zeros((options.bins[0]*options.bins[1],3),'f')
# ------------------------------------------ setup file handles ---------------------------------------
range = np.array([np.array(options.xrange),
np.array(options.yrange),
np.array(options.zrange)])
grid = np.zeros(options.bins,'i')
result = np.zeros((options.bins[0]*options.bins[1],3),'f')
prefix='binned%i-%i_'%(options.data[0],options.data[1])+ \
('weighted%i_'%(options.weight) if options.weight != None else '')
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name': 'STDIN',
'input': sys.stdin,
'output': sys.stdout,
'croak': sys.stderr,
})
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name': name,
'input': open(name),
'output': open(os.path.splitext(name)[0]+ \
'_binned%i-%i'%(options.data[0],options.data[1])+ \
('_weighted%i'%(options.weight) if options.weight != None else '')+ \
os.path.splitext(name)[1],'w'),
'croak': sys.stderr,
})
# ------------------------------------------ loop over input files ---------------------------------------
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# ------------------------------------------ loop over input files ---------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
skip = int(file['input'].readline().split()[0])
for i in xrange(skip): headers = file['input'].readline().split()
data = numpy.loadtxt(file['input'],usecols=numpy.array(options.data+((options.weight,) if options.weight != None else ()))-1)
file['input'].close() # close input ASCII table
data = np.loadtxt(file['input'],usecols=np.array(options.data+((options.weight,) if options.weight != None else ()))-1)
file['input'].close() # close input ASCII table
for i in (0,1): # check data range for x and y
for i in (0,1): # check data range for x and y
if (range[i] == 0.0).all(): range[i] = [data[:,i].min(),data[:,i].max()]
if options.type[i].lower() == 'log': # if log scale
data[:,i] = numpy.log(data[:,i]) # change x,y coordinates to log
range[i] = numpy.log(range[i]) # change range to log, too
if options.type[i].lower() == 'log': # if log scale
data[:,i] = np.log(data[:,i]) # change x,y coordinates to log
range[i] = np.log(range[i]) # change range to log, too
delta = range[:,1]-range[:,0]
@ -116,12 +88,12 @@ for file in files:
if x >=0 and x < options.bins[0] and y >= 0 and y < options.bins[1]: grid[x,y] += 1 if options.weight == None else data[i,2]
if (range[2] == 0.0).all(): range[2] = [grid.min(),grid.max()]
if (range[2] == 0.0).all(): # no data in grid?
if (range[2] == 0.0).all(): # no data in grid?
file['croak'].write('no data found on grid...\n')
range[2,:] = numpy.array([0.0,1.0]) # making up arbitrary z range
range[2,:] = np.array([0.0,1.0]) # making up arbitrary z range
if options.type[2].lower() == 'log':
grid = numpy.log(grid)
range[2] = numpy.log(range[2])
grid = np.log(grid)
range[2] = np.log(range[2])
delta[2] = range[2,1]-range[2,0]
@ -132,12 +104,16 @@ for file in files:
result[i,:] = [range[0,0]+delta[0]/options.bins[0]*(x+0.5),
range[1,0]+delta[1]/options.bins[1]*(y+0.5),
min(1.0,max(0.0,(grid[x,y]-range[2,0])/delta[2]))]
if options.type[0].lower() == 'log': result[i,0] = numpy.exp(result[i,0])
if options.type[1].lower() == 'log': result[i,1] = numpy.exp(result[i,1])
if options.type[0].lower() == 'log': result[i,0] = np.exp(result[i,0])
if options.type[1].lower() == 'log': result[i,1] = np.exp(result[i,1])
if options.invert: result[i,2] = 1.0-result[i,2]
i += 1
# ------------------------------------------ output result -----------------------------------------
file['output'].write('1\thead\n')
file['output'].write('bin_%s\tbin_%s\tz\n'%(headers[options.data[0]-1],headers[options.data[1]-1]))
numpy.savetxt(file['output'],result)
file['output'].close() # close output ASCII table
np.savetxt(file['output'],result)
file['output'].close() # close output ASCII table
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',\
os.path.join(os.path.dirname(file['name']),prefix+os.path.basename(file['name'])))

View File

@ -1,152 +1,117 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,numpy,damask,time
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def location(idx,res):
return numpy.array([ idx % res[0], \
(idx // res[0]) % res[1], \
(idx // res[0] // res[1]) % res[2] ])
def index(location,res):
return ( location[0] % res[0] + \
(location[1] % res[1]) * res[0] + \
(location[2] % res[2]) * res[0] * res[1] )
import os,sys,string
import numpy as np
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog [options] [file[s]]', description = """
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Blows up each value to a surrounding data block of size 'packing' thus increasing the former resolution
to resolution*packing. (Requires numpy.)
to resolution*packing.
""" + string.replace('$Id$','\n','\\n')
)
""", version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', type='string',\
parser.add_option('-c','--coordinates', dest='coords', action='store', type='string', metavar='string',
help='column heading for coordinates [%default]')
parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, \
parser.add_option('-p','--packing', dest='packing', action='store', type='int', nargs=3, metavar='int int int',
help='dimension of packed group %default')
parser.add_option('-r','--resolution', dest='resolution', type='int', nargs=3, \
parser.add_option('-g','--grid', dest='resolution', action='store', type='int', nargs=3, metavar='int int int',
help='resolution in x,y,z [autodetect]')
parser.add_option('-d','--dimension', dest='dimension', type='float', nargs=3, \
parser.add_option('-s','--size', dest='dimension', action='store', type='float', nargs=3, metavar='int int int',
help='dimension in x,y,z [autodetect]')
parser.set_defaults(coords = 'ip')
parser.set_defaults(packing = [2,2,2])
parser.set_defaults(resolution = [0,0,0])
parser.set_defaults(dimension = [0.0,0.0,0.0])
parser.set_defaults(coords = 'ip')
parser.set_defaults(packing = [2,2,2])
parser.set_defaults(grid = [0,0,0])
parser.set_defaults(size = [0.0,0.0,0.0])
(options,filenames) = parser.parse_args()
if len(options.packing) < 3:
parser.error('packing needs three parameters...')
options.packing = numpy.array(options.packing)
options.packing = np.array(options.packing)
prefix = 'blowUp%ix%ix%i_'%(options.packing[0],options.packing[1],options.packing[2])
# ------------------------------------------ setup file handles ---------------------------------------
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
name = os.path.relpath(name)
if os.path.exists(name):
files.append({'name':name, 'input':open(name),
'output':open(os.path.join(os.path.dirname(name),prefix+os.path.basename(name)),'w')})
# ------------------------------------------ loop over input files ---------------------------------------
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': print file['name'],
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \
'\t' + ' '.join(sys.argv[1:]))
file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out size and grid ---------------------------------------------------------
try:
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data
elemCol = table.labels.index('elem') # columns containing location data
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data
elemCol = table.labels.index('elem')
except ValueError:
print 'no coordinate data or element data found...'
file['croak'].write('no coordinate (%s.x) and/or elem data found...\n'%options.coords)
continue
if (any(options.resolution)==0 or any(options.dimension)==0.0):
grid = [{},{},{}]
while table.data_read(): # read next data line of ASCII table
if (any(options.grid)==0 or any(options.size)==0.0):
coords = [{},{},{}]
while table.data_read(): # read next data line of ASCII table
for j in xrange(3):
grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z
resolution = numpy.array([len(grid[0]),\
len(grid[1]),\
len(grid[2]),],'i') # resolution is number of distinct coordinates found
dimension = resolution/numpy.maximum(numpy.ones(3,'d'),resolution-1.0)* \
numpy.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\
max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\
max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\
],'d') # dimension from bounding box, corrected for cell-centeredness
coords[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z
grid = np.array([len(coords[0]),\
len(coords[1]),\
len(coords[2]),],'i') # resolution is number of distinct coordinates found
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness
origin = np.array([min(map(float,coords[0].keys())),\
min(map(float,coords[1].keys())),\
min(map(float,coords[2].keys())),\
],'d') - 0.5 * size / grid
else:
resolution = numpy.array(options.resolution,'i')
dimension = numpy.array(options.dimension,'d')
grid = np.array(options.grid,'i')
size = np.array(options.size,'d')
origin = np.zeros(3,'d')
if resolution[2] == 1:
options.packing[2] = 1
dimension[2] = min(dimension[:2]/resolution[:2]) # z spacing equal to smaller of x or y spacing
for i, res in enumerate(grid):
if res == 1:
options.packing[i] = 1
options.shift[i] = 0
mask = np.ones(3,dtype=bool)
mask[i]=0
size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing
packing = numpy.array(options.packing,'i')
outSize = resolution*packing
print '\t%s @ %s --> %s'%(dimension,resolution,outSize)
packing = np.array(options.packing,'i')
outSize = grid*packing
# ------------------------------------------ assemble header ---------------------------------------
table.head_write()
# ------------------------------------------ process data ---------------------------------------
# ------------------------------------------ process data -------------------------------------------
table.data_rewind()
data = numpy.zeros(outSize.tolist()+[len(table.labels)])
p = numpy.zeros(3,'i')
data = np.zeros(outSize.tolist()+[len(table.labels)])
p = np.zeros(3,'i')
for p[2] in xrange(resolution[2]):
for p[1] in xrange(resolution[1]):
for p[0] in xrange(resolution[0]):
for p[2] in xrange(grid[2]):
for p[1] in xrange(grid[1]):
for p[0] in xrange(grid[0]):
d = p*packing
table.data_read()
data[d[0]:d[0]+packing[0],
d[1]:d[1]+packing[1],
d[2]:d[2]+packing[2],
: ] = numpy.tile(numpy.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size
: ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size
elementSize = dimension/resolution/packing
elementSize = size/grid/packing
elem = 1
for c in xrange(outSize[2]):
for b in xrange(outSize[1]):
@ -154,17 +119,13 @@ for file in files:
data[a,b,c,locationCol:locationCol+3] = [a+0.5,b+0.5,c+0.5]*elementSize
data[a,b,c,elemCol] = elem
table.data = data[a,b,c,:].tolist()
table.data_write() # output processed line
outputAlive = table.data_write() # output processed line
elem += 1
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ output result ---------------------------------------
table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ close file handles ---------------------------------------
for file in files:
file['input'].close() # close input ASCII table
if file['name'] != 'STDIN':
file['output'].close() # close output ASCII table
file['input'].close() # close input ASCII table
file['output'].close() # close output ASCII table
os.rename(file['name']+'_tmp',\
os.path.join(os.path.dirname(file['name']),prefix+os.path.basename(file['name'])))

View File

@ -1,46 +1,29 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,damask
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
import os,sys,string
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Remove column(s) containing scalar, vectorial, and/or tensorial data.
""" + string.replace('$Id$','\n','\\n')
)
""", version = scriptID)
parser.add_option('-v','--vector', dest='vector', action='extend', type='string', \
parser.add_option('-v','--vector', dest='vector', action='extend', type='string', metavar='<string LIST>',
help='heading of columns containing 3x1 vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', metavar='<string LIST>',
help='heading of columns containing 3x3 tensor field values')
parser.add_option('-s','--special', dest='special', action='extend', type='string', \
parser.add_option('-s','--special', dest='special', action='extend', type='string', metavar='<string LIST>',
help='heading of columns containing field values of special dimension')
parser.add_option('-d','--dimension', dest='N', action='store', type='int', \
parser.add_option('-d','--dimension', dest='N', action='store', type='int', metavar='int',
help='dimension of special field values [%default]')
parser.set_defaults(vector = [])
@ -62,36 +45,30 @@ datainfo = { # lis
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
if options.special != None: datainfo['special']['label'] += options.special
# ------------------------------------------ setup file handles ---------------------------------------
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': print file['name']
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \
'\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to delete ----------------------------------------------------
columns = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
@ -99,31 +76,29 @@ for file in files:
if key not in table.labels:
sys.stderr.write('column %s not found...\n'%key)
else:
columns.append([table.labels.index(key),info['len']]) # remember column and extent of requested data
columns.append([table.labels.index(key),info['len']]) # remember column and extent of requested data
if (info['len'] == 1):
table.labels.remove(label) # remove single column head
table.labels.remove(label) # remove single column head
else:
for i in xrange(info['len']):
table.labels.remove('%i_%s'%(i+1,label)) # remove multidimensional column head
table.labels.remove('%i_%s'%(i+1,label)) # remove multidimensional column head
columns.sort(key=lambda x:x[0],reverse=True) # sort from highest column to delete backwards
columns.sort(key=lambda x:x[0],reverse=True) # sort from highest column to delete backwards
# ------------------------------------------ assemble header ---------------------------------------
table.head_write()
# ------------------------------------------ process data ---------------------------------------
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for col,len in columns: # loop over removal candidates
del table.data[col:col+len] # remove each associated entry
outputAlive = table.data_write() # output processed line
while table.data_read(): # read next data line of ASCII table
for col,len in columns: # loop over removal candidates
del table.data[col:col+len] # remove each associated entry
table.data_write() # output processed line
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ output result ---------------------------------------
table.output_flush() # just in case of buffered ASCII table
file['input'].close() # close input ASCII table
file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
file['output'].close # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -1,75 +1,54 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,damask
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
import os,sys,string
from optparse import OptionParser
import damask
scriptID = string.replace('$Id$','\n','\\n')
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog [file[s]]', description = """
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Remove info lines from given ASCIItable(s).
""" + string.replace('$Id$','\n','\\n')
)
""", version = scriptID)
(options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ---------------------------------------
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': print file['name']
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_clear()
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read()
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.head_write()
# ------------------------------------------ process data ---------------------------------------
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
outputAlive = table.data_write() # output processed line
while outputAlive and table.data_read(): # read next data line of ASCII table
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
file['input'].close() # close input ASCII table
file['output'].close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -1,52 +1,32 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,fnmatch,math,string,damask
from optparse import OptionParser, Option
import os,re,sys,string,fnmatch
from optparse import OptionParser
import damask
scriptID = '$Id$'
scriptName = scriptID.split()[1]
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
scriptName = scriptID.split()[1][:-3]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Filter rows according to condition and columns by either white or black listing.
Examples:
Every odd row if x coordinate is positive -- " #ip.x# >= 0.0 and #_row_#%2 == 1 ).
All rows where label 'foo' equals 'bar' -- " #foo# == \"bar\" "
""" + string.replace(scriptID,'\n','\\n')
)
""", version = scriptID)
parser.add_option('-w','--white', dest='whitelist', action='extend', type='string', \
help='white list of column labels (a,b,c,...)', metavar='<LIST>')
parser.add_option('-b','--black', dest='blacklist', action='extend', type='string', \
help='black list of column labels (a,b,c,...)', metavar='<LIST>')
parser.add_option('-c','--condition', dest='condition', type='string', \
help='condition to filter rows', metavar='<EXPR>')
parser.add_option('-w','--white', dest='whitelist', action='extend', type='string', metavar='<string LIST>',
help='white list of column labels (a,b,c,...)')
parser.add_option('-b','--black', dest='blacklist', action='extend', type='string', metavar='<string LIST>',
help='black list of column labels (a,b,c,...)')
parser.add_option('-c','--condition', dest='condition', type='string', metavar='string',
help='condition to filter rows')
parser.set_defaults(whitelist = [])
parser.set_defaults(blacklist = [])
@ -54,9 +34,7 @@ parser.set_defaults(condition = '')
(options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles ---------------------------------------
# ------------------------------------------ setup file handles ------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
@ -65,35 +43,32 @@ else:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files -------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
specials = { \
'_row_': 0,
}
table = damask.ASCIItable(file['input'],file['output'],buffered = False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
table.info_append(string.replace(scriptID,'\n','\\n') + \
'\t' + ' '.join(sys.argv[1:]))
labels = []
positions = []
for position,label in enumerate(table.labels):
if (options.whitelist == [] or any([fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
and (options.blacklist == [] or not any([fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
labels.append(label) # remember name...
positions.append(position) # ...and position
and (options.blacklist == [] or not any([fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
labels.append(label) # remember name...
positions.append(position) # ...and position
interpolator = []
for position,operand in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',options.condition))): # find three groups
for position,operand in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',options.condition))): # find three groups
options.condition = options.condition.replace('#'+operand[0]+'#',
{ '': '{%i}'%position,
's#':'"{%i}"'%position}[operand[1]])
if operand[2] in specials: # special label ?
if operand[2] in specials: # special label ?
interpolator += ['specials["%s"]'%operand[2]]
else:
try:
@ -106,26 +81,21 @@ for file in files:
evaluator = "'" + options.condition + "'.format(" + ','.join(interpolator) + ")"
# ------------------------------------------ assemble header ---------------------------------------
table.labels = labels # update with new label set
table.labels = labels # update with new label set
table.head_write()
# ------------------------------------------ process data ---------------------------------------
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
while outputAlive and table.data_read(): # read next data line of ASCII table
specials['_row_'] += 1 # count row
if options.condition == '' or eval(eval(evaluator)): # valid row ?
table.data = [table.data[position] for position in positions] # retain filtered columns
outputAlive = table.data_write() # output processed line
specials['_row_'] += 1 # count row
# ------------------------------------------ output result -----------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
if options.condition == '' or eval(eval(evaluator)): # valid row ?
table.data = [table.data[position] for position in positions] # retain filtered columns
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
file['input'].close() # close input ASCII table
file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
file['output'].close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new