updated first 9 post processing scripts to latest ASCII table handling style

This commit is contained in:
Martin Diehl 2014-07-21 19:55:05 +00:00
parent c8cd775747
commit e8d2e787bc
9 changed files with 248 additions and 385 deletions

View File

@ -87,28 +87,29 @@ for file in files:
if label not in brokenFormula: if label not in brokenFormula:
evaluator[label] = "'" + formula + "'%(" + ','.join(interpolator) + ")" evaluator[label] = "'" + formula + "'%(" + ','.join(interpolator) + ")"
# ------------------------------------------ calculate one result to get length of labels --------- # ------------------------------------------ process data ------------------------------------------
table.data_read() firstLine=True
labelLen = {}
for label in options.labels:
labelLen[label] = np.size(eval(eval(evaluator[label])))
# ------------------------------------------ assemble header ---------------------------------------
for label,formula in zip(options.labels,options.formulas):
if labelLen[label] == 0:
brokenFormula[label] = True
if label not in brokenFormula:
if labelLen[label] == 1:
table.labels_append(label)
else:
table.labels_append(['%i_%s'%(i+1,label) for i in xrange(labelLen[label])])
table.head_write()
# ------------------------------------------ process data ---------------------------------------
outputAlive = True outputAlive = True
table.data_rewind()
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
specials['_row_'] += 1 # count row specials['_row_'] += 1 # count row
# ------------------------------------------ calculate one result to get length of labels ---------
if firstLine:
labelLen = {}
for label in options.labels:
labelLen[label] = np.size(eval(eval(evaluator[label])))
# ------------------------------------------ assemble header ---------------------------------------
for label,formula in zip(options.labels,options.formulas):
if labelLen[label] == 0:
brokenFormula[label] = True
if label not in brokenFormula:
if labelLen[label] == 1:
table.labels_append(label)
else:
table.labels_append(['%i_%s'%(i+1,label) for i in xrange(labelLen[label])])
table.head_write()
firstLine = False
for label in options.labels: table.data_append(unravel(eval(eval(evaluator[label])))) for label in options.labels: table.data_append(unravel(eval(eval(evaluator[label]))))
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line

View File

@ -30,19 +30,13 @@ parser.set_defaults(stress = 'p')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.defgrad == None or options.stress == None:
parser.error('missing data column...')
datainfo = { # list of requested labels per datatype datainfo = { # list of requested labels per datatype
'defgrad': {'mandatory': True, 'defgrad': {'len':9,
'len':9,
'label':[]}, 'label':[]},
'stress': {'mandatory': True, 'stress': {'len':9,
'len':9,
'label':[]}, 'label':[]},
} }
datainfo['defgrad']['label'].append(options.defgrad) datainfo['defgrad']['label'].append(options.defgrad)
datainfo['stress']['label'].append(options.stress) datainfo['stress']['label'].append(options.stress)
@ -74,21 +68,20 @@ for file in files:
False:'%s' }[info['len']>1]%label False:'%s' }[info['len']>1]%label
if key not in table.labels: if key not in table.labels:
file['croak'].write('column %s not found...\n'%key) file['croak'].write('column %s not found...\n'%key)
missingColumns |= info['mandatory'] # break if label is mandatory missingColumns = True # break if label not found
else: else:
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
if missingColumns: if missingColumns:
continue continue
# ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%i_Cauchy'%(i+1) # ------------------------------------------ assemble header ------------------------------------
for i in xrange(datainfo['stress']['len'])]) # extend ASCII header with new labels table.labels_append(['%i_Cauchy'%(i+1) for i in xrange(datainfo['stress']['len'])]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ----------------------------------------
outputAlive = True outputAlive = True
table.data_rewind()
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
F = np.array(map(float,table.data[column['defgrad'][active['defgrad'][0]]: F = np.array(map(float,table.data[column['defgrad'][active['defgrad'][0]]:
column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3) column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3)

View File

@ -3,7 +3,6 @@
import os,re,sys,math,string import os,re,sys,math,string
import numpy as np import numpy as np
from collections import defaultdict
from optparse import OptionParser from optparse import OptionParser
import damask import damask
@ -45,48 +44,25 @@ datainfo['defgrad']['label'].append(options.defgrad)
# ------------------------------------------ setup file handles ------------------------------------- # ------------------------------------------ setup file handles -------------------------------------
files = [] files = []
if filenames == []: for name in filenames:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) if os.path.exists(name):
else: files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
#--- loop over input files ------------------------------------------------------------------------ #--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:])) table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out dimension and resolution -------------------------------------------------- # --------------- figure out dimension and resolution ----------------------------------------------
try: try:
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data
except ValueError: except ValueError:
file['croak'].write('no coordinate data found...\n'%key) file['croak'].write('no coordinate data found...\n'%key)
continue continue
active = defaultdict(list)
column = defaultdict(dict)
missingColumns = False
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_%s'%label
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True
else:
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
column = table.labels.index(key)
if missingColumns:
continue
# --------------- figure out dimension and resolution ---------------------------------------------
grid = [{},{},{}] grid = [{},{},{}]
while table.data_read(): # read next data line of ASCII table while table.data_read(): # read next data line of ASCII table
for j in xrange(3): for j in xrange(3):
@ -103,6 +79,21 @@ for file in files:
geomdim[2] = min(geomdim[:2]/res[:2]) geomdim[2] = min(geomdim[:2]/res[:2])
N = res.prod() N = res.prod()
# --------------- figure out columns to process ---------------------------------------------------
missingColumns = False
for datatype,info in datainfo.items():
for label in info['label']:
key = '1_%s'%label
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True
else:
column = table.labels.index(key) # remember columns of requested data
if missingColumns:
continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad]) if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad])
if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.defgrad]) if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.defgrad])
@ -112,7 +103,8 @@ for file in files:
table.data_rewind() table.data_rewind()
F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res)) F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res))
idx = 0 idx = 0
(x,y,z) = damask.gridLocation(idx,res) # figure out (x,y,z) position from line count while table.data_read():
(x,y,z) = damask.util.gridLocation(idx,res) # figure out (x,y,z) position from line count
idx += 1 idx += 1
F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3) F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3)
@ -125,20 +117,18 @@ for file in files:
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
table.data_rewind() table.data_rewind()
outputAlive = True
idx = 0 idx = 0
while outputAlive and table.data_read(): # read next data line of ASCII table outputAlive = True
(x,y,z) = damask.gridLocation(idx,res ) # figure out (x,y,z) position from line count while outputAlive and table.data_read(): # read next data line of ASCII table
(x,y,z) = damask.util.gridLocation(idx,res) # figure out (x,y,z) position from line count
idx += 1 idx += 1
if not options.noShape: table.data_append( shapeMismatch[x,y,z]) if not options.noShape: table.data_append( shapeMismatch[x,y,z])
if not options.noVolume: table.data_append(volumeMismatch[x,y,z]) if not options.noVolume: table.data_append(volumeMismatch[x,y,z])
outputAlive = table.data_write() # output processed line
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table outputAlive and table.output_flush() # just in case of buffered ASCII table
file['input'].close() # close input ASCII table (works for stdin) file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout) file['output'].close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN': os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -89,14 +89,13 @@ for file in files:
column = defaultdict(dict) column = defaultdict(dict)
values = defaultdict(dict) values = defaultdict(dict)
curl = defaultdict(dict) curl = defaultdict(dict)
missingColumns = False
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
key = {True :'1_%s', key = {True :'1_%s',
False:'%s' }[info['len']>1]%label False:'%s' }[info['len']>1]%label
if key not in table.labels: if key not in table.labels:
sys.stderr.write('column %s not found...\n'%key) file['croak'].write('column %s not found...\n'%key)
else: else:
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
@ -104,15 +103,12 @@ for file in files:
reshape(list(resolution)+[datainfo[datatype]['len']//3,3]) reshape(list(resolution)+[datainfo[datatype]['len']//3,3])
curl[datatype][label] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\ curl[datatype][label] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\
reshape(list(resolution)+[datainfo[datatype]['len']//3,3]) reshape(list(resolution)+[datainfo[datatype]['len']//3,3])
if missingColumns:
continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
for datatype,info in datainfo.items(): for datatype,labels in active.items(): # loop over vector,tensor
for label in info['label']: for label in labels:
table.labels_append(['%i_curlFFT(%s)'%(i+1,label) table.labels_append(['%i_curlFFT(%s)'%(i+1,label)
for i in xrange(datainfo[datatype]['len'])]) # extend ASCII header with new labels for i in xrange(datainfo[datatype]['len'])]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ read value field -------------------------------------- # ------------------------------------------ read value field --------------------------------------

View File

@ -1,166 +1,130 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,numpy,damask import os,re,sys,math,string
from optparse import OptionParser, Option import numpy as np
from collections import defaultdict
# ----------------------------- from optparse import OptionParser
class extendableOption(Option): import damask
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def location(idx,res):
return ( idx % res[0], \
( idx // res[0]) % res[1], \
( idx // res[0] // res[1]) % res[2] )
def index(location,res):
return ( location[0] % res[0] + \
( location[1] % res[1]) * res[0] + \
( location[2] % res[2]) * res[1] * res[0] )
scriptID = '$Id$'
scriptName = scriptID.split()[1]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add column(s) containing deformed configuration of requested column(s). Add column(s) containing deformed configuration of requested column(s).
Operates on periodic ordered three-dimensional data sets. Operates on periodic ordered three-dimensional data sets.
""" + string.replace('$Id$','\n','\\n') """, version = string.replace(scriptID,'\n','\\n')
) )
parser.add_option('-c','--coordinates', dest='coords', type='string',\ parser.add_option('-c','--coordinates', dest='coords', type='string', metavar='string', \
help='column heading for coordinates [%default]') help='column heading for coordinates [%default]')
parser.add_option('-d','--defgrad', dest='defgrad', type='string', \ parser.add_option('-d','--defgrad', dest='defgrad', type='string', metavar='string', \
help='heading of columns containing tensor field values') help='heading of columns containing tensor field values')
parser.add_option('-l', '--linear', dest='linearreconstruction', action='store_true',\ parser.add_option('-l', '--linear', dest='linearreconstruction', action='store_true',\
help='use linear reconstruction of geometry [%default]') help='use linear reconstruction of geometry [%default]')
parser.set_defaults(coords = 'ip') parser.set_defaults(coords = 'ip')
parser.set_defaults(defgrad = 'f' ) parser.set_defaults(defgrad = 'f' )
parser.set_defaults(linearreconstruction = False) parser.set_defaults(linearreconstruction = False)
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# ------------------------------------------ setup file handles --------------------------------------- datainfo = { # list of requested labels per datatype
'defgrad': {'len':9,
'label':[]},
}
datainfo['defgrad']['label'].append(options.defgrad)
# ------------------------------------------ setup file handles -------------------------------------
files = [] files = []
if filenames == []: for name in filenames:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) if os.path.exists(name):
else: files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': print file['name'], file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \ table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
'\t' + ' '.join(sys.argv[1:]))
# --------------- figure out dimension and resolution # --------------- figure out dimension and resolution ----------------------------------------------
try: try:
locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data
except ValueError: except ValueError:
print 'no coordinate data found...' file['croak'].write('no coordinate data found...\n'%key)
continue continue
grid = [{},{},{}] grid = [{},{},{}]
while table.data_read(): # read next data line of ASCII table
while table.data_read(): # read next data line of ASCII table
if str(table.data[locationCol+1]) in grid[1] and len(grid[1])>1: # geomdim[1] and res[1] already figured out, skip layers
table.data_skipLines(len(grid[1])*len(grid[0])-1)
else:
if str(table.data[locationCol]) in grid[0]: # geomdim[0] and res[0] already figured out, skip lines
table.data_skipLines(len(grid[0])-1)
for j in xrange(3): for j in xrange(3):
grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z
res = np.array([len(grid[0]),\
res = numpy.array([len(grid[0]),\ len(grid[1]),\
len(grid[1]),\ len(grid[2]),],'i') # resolution is number of distinct coordinates found
len(grid[2]),],'i') # resolution is number of distinct coordinates found geomdim = res/np.maximum(np.ones(3,'d'),res-1.0)* \
geomdim = res/numpy.maximum(numpy.ones(3,'d'),res-1.0)* \ np.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\
numpy.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\ max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\
max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\ max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\
max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\ ],'d') # dimension from bounding box, corrected for cell-centeredness
],'d') # dimension from bounding box, corrected for cell-centeredness
if res[2] == 1: if res[2] == 1:
geomdim[2] = min(geomdim[:2]/res[:2]) geomdim[2] = min(geomdim[:2]/res[:2])
N = res.prod() N = res.prod()
print '\t%s @ %s'%(geomdim,res)
# --------------- figure out columns to process ---------------------------------------------------
missingColumns = False
# --------------- figure out columns to process for datatype,info in datainfo.items():
for label in info['label']:
key = '1_%s'%label
if key not in table.labels:
file['croak'].write('column %s not found...\n'%key)
missingColumns = True
else:
column = table.labels.index(key) # remember columns of requested data
key = '1_%s' %options.defgrad if missingColumns:
if key not in table.labels: continue
sys.stderr.write('column %s not found...\n'%key)
else:
F = numpy.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res))
table.labels_append(['%s_coordsMod'%(coord+1) for coord in xrange(3)]) # extend ASCII header with new labels
column = table.labels.index(key)
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%s_coords'%(coord+1) for coord in xrange(3)]) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ read value field --------------------------------------- # ------------------------------------------ read deformation gradient field -----------------------
table.data_rewind() table.data_rewind()
F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res))
idx = 0 idx = 0
while table.data_read(): # read next data line of ASCII table while table.data_read():
(x,y,z) = location(idx,res) # figure out (x,y,z) position from line count (x,y,z) = damask.util.gridLocation(idx,res) # figure out (x,y,z) position from line count
idx += 1 idx += 1
F[:,:,x,y,z] = numpy.array(map(float,table.data[column:column+9]),'d').reshape(3,3) F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3)
# ------------------------------------------ process value field ---------------------------- # ------------------------------------------ calculate coordinates ---------------------------------
Favg = damask.core.math.tensorAvg(F) Favg = damask.core.math.tensorAvg(F)
if options.linearreconstruction: if options.linearreconstruction:
centroids = damask.core.mesh.deformedCoordsLin(geomdim,F,Favg) centroids = damask.core.mesh.deformedCoordsLin(geomdim,F,Favg)
else: else:
centroids = damask.core.mesh.deformedCoordsFFT(geomdim,F,Favg) centroids = damask.core.mesh.deformedCoordsFFT(geomdim,F,Favg)
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ------------------------------------------
table.data_rewind() table.data_rewind()
idx = 0 idx = 0
while table.data_read(): # read next data line of ASCII table outputAlive = True
(x,y,z) = location(idx,res) # figure out (x,y,z) position from line count while outputAlive and table.data_read(): # read next data line of ASCII table
(x,y,z) = damask.util.gridLocation(idx,res) # figure out (x,y,z) position from line count
idx += 1 idx += 1
table.data_append(list(centroids[:,x,y,z])) table.data_append(list(centroids[:,x,y,z]))
outputAlive = table.data_write() # output processed line
table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.output_flush() # just in case of buffered ASCII table file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
file['input'].close() # close input ASCII table os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
if file['name'] != 'STDIN':
file['output'].close # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -1,53 +1,34 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,damask import os,re,sys,math,string
from optparse import OptionParser, Option from collections import defaultdict
from optparse import OptionParser
# ----------------------------- import damask
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
scriptID = '$Id$'
scriptName = scriptID.split()[1]
def determinant(m): def determinant(m):
return +m[0]*m[4]*m[8] \ return +m[0]*m[4]*m[8] \
+m[1]*m[5]*m[6] \ +m[1]*m[5]*m[6] \
+m[2]*m[3]*m[7] \ +m[2]*m[3]*m[7] \
-m[2]*m[4]*m[6] \ -m[2]*m[4]*m[6] \
-m[1]*m[3]*m[8] \ -m[1]*m[3]*m[8] \
-m[0]*m[5]*m[7] \ -m[0]*m[5]*m[7]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing determinant of requested tensor column(s). Add column(s) containing determinant of requested tensor column(s).
""" + string.replace('$Id$','\n','\\n') """, version=string.replace(scriptID,'\n','\\n')
) )
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', metavar='<string LIST>', \
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
help='heading of columns containing tensor field values') help='heading of columns containing tensor field values')
parser.set_defaults(tensor = []) parser.set_defaults(tensor = [])
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
@ -55,73 +36,64 @@ parser.set_defaults(tensor = [])
if len(options.tensor) == 0: if len(options.tensor) == 0:
parser.error('no data column specified...') parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype datainfo = { # list of requested labels per datatype
'tensor': {'len':9, 'tensor': {'len':9,
'label':[]}, 'label':[]},
} }
datainfo['tensor']['label'] += options.tensor
if options.tensor != None: datainfo['tensor']['label'] += options.tensor # ------------------------------------------ setup file handles -----------------------------------
# ------------------------------------------ setup file handles ---------------------------------------
files = [] files = []
if filenames == []: if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else: else:
for name in filenames: for name in filenames:
if os.path.exists(name): if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')}) files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': print file['name'] if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \ table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
'\t' + ' '.join(sys.argv[1:]))
active = {} active = defaultdict(list)
column = {} column = defaultdict(dict)
head = []
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
key = {True :'1_%s', key = {True :'1_%s',
False:'%s' }[info['len']>1]%label False:'%s' }[info['len']>1]%label
if key not in table.labels: if key not in table.labels:
sys.stderr.write('column %s not found...\n'%key) file['croak'].write('column %s not found...\n'%key)
else: else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
table.labels_append('det(%s)'%label) # extend ASCII header with new labels
# ------------------------------------------ assemble header ---------------------------------------
# ------------------------------------------ assemble header ---------------------------------------
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested determinants
table.labels_append('det(%s)'%label) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
outputAlive = True
while table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor
for datatype,labels in active.items(): # loop over vector,tensor for label in labels: # loop over all requested determinantes
for label in labels: # loop over all requested norms
table.data_append(determinant(map(float,table.data[column[datatype][label]: table.data_append(determinant(map(float,table.data[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]))) column[datatype][label]+datainfo[datatype]['len']])))
table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.output_flush() # just in case of buffered ASCII table file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
file['input'].close() # close input ASCII table
if file['name'] != 'STDIN': if file['name'] != 'STDIN':
file['output'].close # close output ASCII table os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -1,29 +1,15 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,string,damask import os,re,sys,math,string
from optparse import OptionParser, Option from collections import defaultdict
from optparse import OptionParser
import damask
scriptID = '$Id$'
scriptName = scriptID.split()[1]
oneThird = 1.0/3.0 oneThird = 1.0/3.0
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def deviator(m): def deviator(m):
sph = oneThird*(m[0]+m[4]+m[8]) sph = oneThird*(m[0]+m[4]+m[8])
@ -32,19 +18,17 @@ def deviator(m):
m[8] = m[8] - sph m[8] = m[8] - sph
return m return m
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing deviator of requested tensor column(s). Add column(s) containing deviator of requested tensor column(s).
""" + string.replace('$Id$','\n','\\n') """, version = string.replace(scriptID,'\n','\\n')
) )
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', metavar='<string LIST>', \
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
help='heading of columns containing tensor field values') help='heading of columns containing tensor field values')
parser.add_option('-s','--spherical', dest='hydrostatic', action='store_true',\ parser.add_option('-s','--spherical', dest='hydrostatic', action='store_true',\
help='also add sperical part of tensor (hydrostatic component, pressure)') help='also add sperical part of tensor (hydrostatic component, pressure)')
@ -56,76 +40,67 @@ parser.set_defaults(tensor = [])
if len(options.tensor) == 0: if len(options.tensor) == 0:
parser.error('no data column specified...') parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype datainfo = { # list of requested labels per datatype
'tensor': {'len':9, 'tensor': {'len':9,
'label':[]}, 'label':[]},
} }
datainfo['tensor']['label'] += options.tensor
if options.tensor != None: datainfo['tensor']['label'] += options.tensor # ------------------------------------------ setup file handles -----------------------------------
# ------------------------------------------ setup file handles ---------------------------------------
files = [] files = []
if filenames == []: if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else: else:
for name in filenames: for name in filenames:
if os.path.exists(name): if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')}) files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# ------------------------------------------ loop over input files --------------------------------------- # ------------------------------------------ loop over input files ---------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': print file['name'] if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \ table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
'\t' + ' '.join(sys.argv[1:]))
active = {} active = defaultdict(list)
column = {} column = defaultdict(dict)
head = []
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
key = {True :'1_%s', key = {True :'1_%s',
False:'%s' }[info['len']>1]%label False:'%s' }[info['len']>1]%label
if key not in table.labels: if key not in table.labels:
sys.stderr.write('column %s not found...\n'%key) file['croak'].write('column %s not found...\n'%key)
else: else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
table.labels_append(['%i_dev(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels
if(options.hydrostatic): table.labels_append('sph(%s)'%label)
# ------------------------------------------ assemble header ---------------------------------------
# ------------------------------------------ assemble header ---------------------------------------
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested determinants
table.labels_append(['%i_dev(%s)'%(i+1,label) for i in xrange(9)]) # extend ASCII header with new labels
if(options.hydrostatic): table.labels_append('sph(%s)'%label)
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
outputAlive = True
while table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor
for datatype,labels in active.items(): # loop over vector,tensor for label in labels: # loop over all deviators
for label in labels: # loop over all deviators
myTensor = map(float,table.data[column[datatype][label]: myTensor = map(float,table.data[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]) column[datatype][label]+datainfo[datatype]['len']])
table.data_append(deviator(myTensor)) table.data_append(deviator(myTensor))
if(options.hydrostatic): table.data_append(oneThird*(myTensor[0]+myTensor[4]+myTensor[8])) if(options.hydrostatic): table.data_append(oneThird*(myTensor[0]+myTensor[4]+myTensor[8]))
table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.output_flush() # just in case of buffered ASCII table file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
file['input'].close() # close input ASCII table
if file['name'] != 'STDIN': if file['name'] != 'STDIN':
file['output'].close # close output ASCII table os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -103,14 +103,13 @@ for file in files:
column = defaultdict(dict) column = defaultdict(dict)
values = defaultdict(dict) values = defaultdict(dict)
divergence = defaultdict(dict) divergence = defaultdict(dict)
missingColumns = False
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
key = {True :'1_%s', key = {True :'1_%s',
False:'%s' }[info['len']>1]%label False:'%s' }[info['len']>1]%label
if key not in table.labels: if key not in table.labels:
sys.stderr.write('column %s not found...\n'%key) file['croak'].write('column %s not found...\n'%key)
else: else:
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
@ -120,13 +119,10 @@ for file in files:
for accuracy in options.accuracy: for accuracy in options.accuracy:
divergence[datatype][label][accuracy] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len']//3)]).\ divergence[datatype][label][accuracy] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len']//3)]).\
reshape(list(resolution)+[datainfo[datatype]['len']//3]) reshape(list(resolution)+[datainfo[datatype]['len']//3])
if missingColumns:
continue
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
for datatype,info in datainfo.items(): for datatype,labels in active.items(): # loop over vector,tensor
for label in info['label']: for label in labels:
for accuracy in options.accuracy: for accuracy in options.accuracy:
if datatype == 'vector': # extend ASCII header with new labels if datatype == 'vector': # extend ASCII header with new labels
table.labels_append(['div%s(%s)'%(accuracy,label)]) table.labels_append(['div%s(%s)'%(accuracy,label)])

View File

@ -1,31 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,numpy,string,damask import os,re,sys,math,string
from optparse import OptionParser, Option import numpy as np
from collections import defaultdict
# ----------------------------- from optparse import OptionParser
class extendableOption(Option): import damask
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
scriptID = '$Id$'
scriptName = scriptID.split()[1]
def normalize(vec): def normalize(vec):
return vec/numpy.sqrt(numpy.inner(vec,vec)) return vec/np.sqrt(np.inner(vec,vec))
def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44) def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44)
v = normalize(vec) v = normalize(vec)
@ -36,7 +22,7 @@ def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44)
invE = S11-(S11-S12-0.5*S44)* (1.0 - \ invE = S11-(S11-S12-0.5*S44)* (1.0 - \
(v[0]**4+v[1]**4+v[2]**4) \ (v[0]**4+v[1]**4+v[2]**4) \
/#------------------------------------ /#------------------------------------
numpy.inner(v,v)**2 \ np.inner(v,v)**2 \
) )
return 1.0/invE return 1.0/invE
@ -45,20 +31,18 @@ def E_hkl(stiffness,vec): # stiffness = (c11,c12,c44)
# MAIN # MAIN
# -------------------------------------------------------------------- # --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing directional stiffness Add column(s) containing directional stiffness
based on given cubic stiffness values C11, C12, and C44 in consecutive columns. based on given cubic stiffness values C11, C12, and C44 in consecutive columns.
""" + string.replace('$Id$','\n','\\n') """, version = string.replace(scriptID,'\n','\\n')
) )
parser.add_option('-c','--stiffness', dest='vector', action='extend', type='string', \ parser.add_option('-c','--stiffness', dest='vector', action='extend', type='string', metavar='<string LIST>', \
help='heading of column containing C11 (followed by C12, C44) field values', \ help='heading of column containing C11 (followed by C12, C44) field values')
metavar='<label>')
parser.add_option('-d','--direction', \ parser.add_option('-d','--direction', \
'--hkl', dest='hkl', action='store', type='int', nargs=3, \ '--hkl', dest='hkl', action='store', type='int', nargs=3, metavar='int int int', \
help='direction of elastic modulus %default') help='direction of elastic modulus %default')
parser.set_defaults(vector = []) parser.set_defaults(vector = [])
parser.set_defaults(hkl = [1,1,1]) parser.set_defaults(hkl = [1,1,1])
@ -72,9 +56,7 @@ datainfo = { # lis
'label':[]}, 'label':[]},
} }
datainfo['vector']['label'] += options.vector
if options.vector != None: datainfo['vector']['label'] += options.vector
# ------------------------------------------ setup file handles --------------------------------------- # ------------------------------------------ setup file handles ---------------------------------------
@ -87,19 +69,16 @@ else:
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
# ------------------------------------------ loop over input files --------------------------------------- # ------------------------------------------ loop over input files ---------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n') if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info table.head_read() # read ASCII header info
table.info_append(string.replace('$Id$','\n','\\n') + \ table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:]))
'\t' + ' '.join(sys.argv[1:]))
# --------------- figure out columns to process active = defaultdict(list)
active = {} column = defaultdict(dict)
column = {}
head = []
for datatype,info in datainfo.items(): for datatype,info in datainfo.items():
for label in info['label']: for label in info['label']:
@ -107,36 +86,33 @@ for file in files:
for key in ['1_'+label,label]: for key in ['1_'+label,label]:
if key in table.labels: if key in table.labels:
foundIt = True foundIt = True
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label) active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data column[datatype][label] = table.labels.index(key) # remember columns of requested data
table.labels_append('E%i%i%i(%s)'%(options.hkl[0],
options.hkl[1],
options.hkl[2],label)) # extend ASCII header with new labels
if not foundIt: if not foundIt:
file['croak'].write('column %s not found...\n'%label) file['croak'].write('column %s not found...\n'%label)
# ------------------------------------------ assemble header ---------------------------------------
# ------------------------------------------ assemble header ---------------------------------------
for datatype,labels in active.items(): # loop over vector,tensor
for label in labels: # loop over all requested stiffnesses
table.labels_append('E%i%i%i(%s)'%(options.hkl[0],
options.hkl[1],
options.hkl[2],label)) # extend ASCII header with new labels
table.head_write() table.head_write()
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ----------------------------------------
outputAlive = True
while table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over vector,tensor
for datatype,labels in active.items(): # loop over vector,tensor for label in labels: # loop over all requested stiffnesses
for label in labels: # loop over all requested stiffnesses
table.data_append(E_hkl(map(float,table.data[column[datatype][label]:\ table.data_append(E_hkl(map(float,table.data[column[datatype][label]:\
column[datatype][label]+datainfo[datatype]['len']]),options.hkl)) column[datatype][label]+datainfo[datatype]['len']]),options.hkl))
table.data_write() # output processed line outputAlive = table.data_write() # output processed line
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
outputAlive and table.output_flush() # just in case of buffered ASCII table
table.output_flush() # just in case of buffered ASCII table file['input'].close() # close input ASCII table (works for stdin)
file['output'].close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN': if file['name'] != 'STDIN':
file['input'].close() # close input ASCII table os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
file['output'].close() # close output ASCII table
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new