moved useful ASCII post processing scripts from my Code repository into the DAMASK post-proc.

This commit is contained in:
Philip Eisenlohr 2011-06-21 16:25:48 +00:00
parent daa45306f2
commit b252b467e8
8 changed files with 1276 additions and 0 deletions

171
processing/post/addCauchy Executable file
View File

@ -0,0 +1,171 @@
#!/usr/bin/python
import os,re,sys,math,numpy,string
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def prefixMultiply(what,len):
return {True: ['%i_%s'%(i+1,what) for i in range(len)],
False:[what]}[len>1]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing Cauchy stress based on given column(s) of
deformation gradient and first Piola--Kirchhoff stress.
$Id: addCauchy 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-f','--defgrad', dest='defgrad', type='string', \
help='heading of columns containing deformation gradient [%default]')
parser.add_option('-p','--stress', dest='stress', type='string', \
help='heading of columns containing first Piola--Kirchhoff stress [%default]')
parser.set_defaults(memory = False)
parser.set_defaults(defgrad = 'f')
parser.set_defaults(stress = 'p')
(options,filenames) = parser.parse_args()
if options.defgrad == None or options.stress == None:
parser.error('missing data column...')
datainfo = { # list of requested labels per datatype
'defgrad': {'len':9,
'label':[]},
'stress': {'len':9,
'label':[]},
}
datainfo['defgrad']['label'].append(options.defgrad)
datainfo['stress']['label'].append(options.stress)
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'handle':sys.stdin})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
for i,l in enumerate(headers):
if l.startswith('1_'):
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
headers[i] = l[2:]
active = {}
column = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
sys.stderr.write('column %s not found...\n'%key)
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
head += prefixMultiply('Cauchy',datainfo[datatype]['len'])
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addCauchy 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
for line in {True : data,
False : file['input']}[options.memory]:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
F = numpy.array(map(float,items[column['defgrad'][active['defgrad'][0]]:
column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3)
P = numpy.array(map(float,items[column['stress'][active['stress'][0]]:
column['stress'][active['stress'][0]]+datainfo['stress']['len']]),'d').reshape(3,3)
output += '\t'+'\t'.join(map(str,1.0/numpy.linalg.det(F)*numpy.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
output += '\n'
if not options.memory:
file['output'].write(output)
output = ''
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
if options.memory:
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close
os.rename(file['name']+'_tmp',file['name'])

172
processing/post/addDeterminant Executable file
View File

@ -0,0 +1,172 @@
#!/usr/bin/python
import os,re,sys,math,string
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def determinant(m):
return +m[0]*m[4]*m[8] \
+m[1]*m[5]*m[6] \
+m[2]*m[3]*m[7] \
-m[2]*m[4]*m[6] \
-m[1]*m[3]*m[8] \
-m[0]*m[5]*m[7] \
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing determinant of requested tensor column(s).
$Id: addDeterminant 278 2011-06-15 17:57:42Z MPIE\p.eisenlohr $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
help='heading of columns containing tensor field values')
parser.set_defaults(memory = False)
parser.set_defaults(vector = [])
parser.set_defaults(tensor = [])
(options,filenames) = parser.parse_args()
if len(options.vector) + len(options.tensor) == 0:
parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
for i,l in enumerate(headers):
if l.startswith('1_'):
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
headers[i] = l[2:]
active = {}
column = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
sys.stderr.write('column %s not found...\n'%key)
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
head.append('det(%s)'%label)
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addDeterminant 278 2011-06-15 17:57:42Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
for line in {True : data,
False : file['input']}[options.memory]:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
for datatype,labels in active.items():
for label in labels:
theValue = determinant(map(float,items[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]))
output += '\t%f'%theValue
output += '\n'
if not options.memory:
file['output'].write(output)
output = ''
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
if options.memory:
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close
os.rename(file['name']+'_tmp',file['name'])

221
processing/post/addDivergence Executable file
View File

@ -0,0 +1,221 @@
#!/usr/bin/python
import os,re,sys,math,string
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def location(idx,res):
return ( idx % res[0], \
(idx // res[0]) % res[1], \
(idx // res[0] // res[1]) % res[2] )
def index(location,res):
return ( location[0] % res[0] + \
(location[1] % res[1]) * res[0] + \
(location[2] % res[2]) * res[0] * res[1] )
def prefixMultiply(what,len):
return {True: ['%i_%s'%(i+1,what) for i in range(len)],
False:[what]}[len>1]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
FDcoefficients = [ \
[1.0/2.0, 0.0, 0.0, 0.0],\
[2.0/3.0,-1.0/12.0, 0.0, 0.0],\
[3.0/4.0,-3.0/20.0,1.0/ 60.0, 0.0],\
[4.0/5.0,-1.0/ 5.0,4.0/105.0,-1.0/280.0],\
]
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing divergence of requested column(s).
Operates on periodic ordered three-dimensional data sets.
Deals with both vector- and tensor-valued fields.
$Id: addDivergence 264 2011-05-25 17:43:45Z MPIE\p.eisenlohr $
""")
parser.add_option('-a','--accuracy', dest='accuracy', type='int', \
help='degree of central difference accuracy [%default]')
parser.add_option('-v','--vector', dest='vector', action='extend', type='string', \
help='heading of columns containing vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
help='heading of columns containing tensor field values')
parser.add_option('-d','--dimension', dest='dim', type='float', nargs=3, \
help='physical dimension of data set in x (fast) y z (slow) [%default]')
parser.add_option('-r','--resolution', dest='res', type='int', nargs=3, \
help='resolution of data set in x (fast) y z (slow)')
parser.set_defaults(accuracy = 8)
parser.set_defaults(vector = [])
parser.set_defaults(tensor = [])
parser.set_defaults(dim = [1.0,1.0,1.0])
accuracyChoices = [2,4,6,8]
(options,filenames) = parser.parse_args()
if len(options.vector) + len(options.tensor) == 0:
parser.error('no data column specified...')
if len(options.dim) < 3:
parser.error('improper dimension specification...')
if not options.res or len(options.res) < 3:
parser.error('improper resolution specification...')
if options.accuracy not in accuracyChoices:
parser.error('accuracy must be chosen from %s...'%(', '.join(accuracyChoices)))
accuracy = options.accuracy//2-1
datainfo = { # list of requested labels per datatype
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'handle':sys.stdin})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'handle':open(name)})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
content = file['handle'].readlines()
file['handle'].close()
# get labels by either read the first row, or - if keyword header is present - the last line of the header
headerlines = 1
m = re.search('(\d+)\s*head', content[0].lower())
if m:
headerlines = int(m.group(1))
passOn = content[1:headerlines]
headers = content[headerlines].split()
data = content[headerlines+1:]
regexp = re.compile('1_\d+_')
for i,l in enumerate(headers):
if regexp.match(l):
headers[i] = l[2:]
active = {}
column = {}
values = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
print 'column %s not found...'%key
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
if datatype not in values: values[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
values[datatype][label] = [[0.0 for i in range(datainfo[datatype]['len'])] \
for j in range(options.res[0]*options.res[1]*options.res[2])]
head += prefixMultiply('div%i(%s)'%(options.accuracy,label),datainfo[datatype]['len']/3)
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addDivergence 264 2011-05-25 17:43:45Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
# ------------------------------------------ read value field ---------------------------------------
idx = 0
for line in data:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
for datatype,labels in active.items():
for label in labels:
values[datatype][label][idx] = map(float,items[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']])
idx += 1
# ------------------------------------------ read file ---------------------------------------
idx = 0
for line in data:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
(x,y,z) = location(idx,options.res)
for datatype,labels in active.items():
for label in labels:
for k in range(datainfo[datatype]['len']/3): # formulas from Mikhail Itskov: Tensor Algebra and Tensor Analysis for Engineers, Springer 2009, p 52
theDiv = 0.0
for a in range(1+accuracy):
theDiv += FDcoefficients[accuracy][a] * \
( \
(values[datatype][label][index([x+1+a,y,z],options.res)][k*3+0] - \
values[datatype][label][index([x-1-a,y,z],options.res)][k*3+0]) * options.res[0] / options.dim[0] + \
(values[datatype][label][index([x,y+1+a,z],options.res)][k*3+1] - \
values[datatype][label][index([x,y-1-a,z],options.res)][k*3+1]) * options.res[1] / options.dim[1] + \
(values[datatype][label][index([x,y,z+1+a],options.res)][k*3+2] - \
values[datatype][label][index([x,y,z-1-a],options.res)][k*3+2]) * options.res[2] / options.dim[2] \
)
output += '\t%f'%theDiv
output += '\n'
idx += 1
# ------------------------------------------ output result ---------------------------------------
if file['name'] == 'STDIN':
print output
else:
file['handle'] = open(file['name']+'_tmp','w')
try:
file['handle'].write(output)
file['handle'].close()
os.rename(file['name']+'_tmp',file['name'])
except:
print 'error during writing',file['name']+'_tmp'

175
processing/post/addMises Executable file
View File

@ -0,0 +1,175 @@
#!/usr/bin/python
import os,re,sys,math,numpy,string
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def Mises(what,tensor):
dev = tensor - numpy.trace(tensor)/3.0*numpy.eye(3)
symdev = 0.5*(dev+dev.T)
return math.sqrt(numpy.sum(symdev*symdev.T)*
{
'stress': 3.0/2.0,
'strain': 2.0/3.0,
}[what])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add vonMises equivalent values for symmetric part of requested strains and/or stresses.
$Id: addMises 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-e','--strain', dest='strain', action='extend', type='string', \
help='heading(s) of columns containing strain tensors')
parser.add_option('-s','--stress', dest='stress', action='extend', type='string', \
help='heading(s) of columns containing stress tensors')
parser.set_defaults(memory = False)
parser.set_defaults(strain = [])
parser.set_defaults(stress = [])
(options,filenames) = parser.parse_args()
if len(options.strain) + len(options.stress) == 0:
parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype
'strain': {'len':9,
'label':[]},
'stress': {'len':9,
'label':[]},
}
if options.strain != None: datainfo['strain']['label'] += options.strain
if options.stress != None: datainfo['stress']['label'] += options.stress
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
for i,l in enumerate(headers):
if l.startswith('1_'):
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
headers[i] = l[2:]
active = {}
column = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
sys.stderr.write('column %s not found...\n'%key)
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
head.append('Mises(%s)'%label)
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addMises 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
for line in {True : data,
False : file['input']}[options.memory]:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
for datatype,labels in active.items():
for label in labels:
theMises = Mises(datatype,
numpy.array(map(float,items[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(3,3))
output += '\t%f'%theMises
output += '\n'
if not options.memory:
file['output'].write(output)
output = ''
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
if options.memory:
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close
os.rename(file['name']+'_tmp',file['name'])

168
processing/post/addNorm Executable file
View File

@ -0,0 +1,168 @@
#!/usr/bin/python
import os,re,sys,math,string
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def L2(object):
return math.sqrt(sum([x*x for x in object]))
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing norm of requested column(s) being either vectors or tensors.
$Id: addNorm 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-v','--vector', dest='vector', action='extend', type='string', \
help='heading of columns containing vector field values')
parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \
help='heading of columns containing tensor field values')
parser.set_defaults(memory = False)
parser.set_defaults(vector = [])
parser.set_defaults(tensor = [])
(options,filenames) = parser.parse_args()
if len(options.vector) + len(options.tensor) == 0:
parser.error('no data column specified...')
datainfo = { # list of requested labels per datatype
'vector': {'len':3,
'label':[]},
'tensor': {'len':9,
'label':[]},
}
if options.vector != None: datainfo['vector']['label'] += options.vector
if options.tensor != None: datainfo['tensor']['label'] += options.tensor
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
for i,l in enumerate(headers):
if l.startswith('1_'):
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
headers[i] = l[2:]
active = {}
column = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
sys.stderr.write('column %s not found...\n'%key)
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
head.append('norm(%s)'%label)
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addNorm 263 2011-05-25 17:42:50Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
for line in {True : data,
False : file['input']}[options.memory]:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
for datatype,labels in active.items():
for label in labels:
theNorm = L2(map(float,items[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]))
output += '\t%f'%theNorm
output += '\n'
if not options.memory:
file['output'].write(output)
output = ''
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
if options.memory:
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close
os.rename(file['name']+'_tmp',file['name'])

215
processing/post/addStrainTensors Executable file
View File

@ -0,0 +1,215 @@
#!/usr/bin/python
import os,re,sys,math,numpy,string
from optparse import OptionParser, Option
def operator(how,vector):
return { \
'ln': numpy.log(vector)*1.0,\
'Biot': vector *1.0,\
'Green': vector*vector *0.5,\
}[how]
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def prefixMultiply(what,len):
return {True: ['%i_%s'%(i+1,what) for i in range(len)],
False:[what]}[len>1]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing given strains based on given stretches of requested deformation gradient column(s).
$Id: addStrainTensors 274 2011-06-14 12:59:39Z MPIE\o.guevenc $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-u','--right', action='store_true', dest='right', \
help='calculate strains based on right Cauchy--Green deformation, i.e., C and U')
parser.add_option('-v','--left', action='store_true', dest='left', \
help='calculate strains based on left Cauchy--Green deformation, i.e., B and V')
parser.add_option('-l','--logarithmic', action='store_true', dest='logarithmic', \
help='calculate logarithmic strain tensor')
parser.add_option('-b','--biot', action='store_true', dest='biot', \
help='calculate biot strain tensor')
parser.add_option('-g','--green', action='store_true', dest='green', \
help='calculate green strain tensor')
parser.add_option('-f','--deformation', dest='defgrad', action='extend', type='string', \
help='heading(s) of columns containing deformation tensor values %default')
parser.set_defaults(memory = False)
parser.set_defaults(right = False)
parser.set_defaults(left = False)
parser.set_defaults(logarithmic = False)
parser.set_defaults(biot = False)
parser.set_defaults(green = False)
parser.set_defaults(defgrad = ['f'])
(options,filenames) = parser.parse_args()
stretches = []
stretch = {}
strains = []
if options.right: stretches.append('U')
if options.left: stretches.append('V')
if options.logarithmic: strains.append('ln')
if options.biot: strains.append('Biot')
if options.green: strains.append('Green')
datainfo = { # list of requested labels per datatype
'defgrad': {'len':9,
'label':[]},
}
if options.defgrad != None: datainfo['defgrad']['label'] += options.defgrad
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
for i,l in enumerate(headers):
if l.startswith('1_'):
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
headers[i] = l[2:]
active = {}
column = {}
head = []
for datatype,info in datainfo.items():
for label in info['label']:
key = {True :'1_%s',
False:'%s' }[info['len']>1]%label
if key not in headers:
sys.stderr.write('column %s not found...\n'%key)
else:
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
active[datatype].append(label)
column[datatype][label] = headers.index(key)
for theStretch in stretches:
for theStrain in strains:
head += prefixMultiply('%s(%s)'%(theStrain,theStretch), 9)
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: addStrainTensors 274 2011-06-14 12:59:39Z MPIE\o.guevenc $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers + head) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
for line in {True : data,
False : file['input']}[options.memory]:
items = line.split()[:len(headers)]
if len(items) < len(headers):
continue
output += '\t'.join(items)
for datatype,labels in active.items():
for label in labels:
defgrad = numpy.array(map(float,items[column[datatype][label]:
column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(3,3)
(U,S,Vh) = numpy.linalg.svd(defgrad)
R = numpy.dot(U,Vh)
stretch['U'] = numpy.dot(numpy.linalg.inv(R),defgrad)
stretch['V'] = numpy.dot(defgrad,numpy.linalg.inv(R))
for theStretch in stretches:
for i in range(9):
if stretch[theStretch][i%3,i//3] < 1e-15:
stretch[theStretch][i%3,i//3] = 0.0
(D,V) = numpy.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix)
for i,eigval in enumerate(D):
if eigval < 0.0: # flip negative eigenvalues
D[i] = -D[i]
V[:,i] = -V[:,i]
if numpy.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality
V[:,(i+1)%3] = numpy.cross(V[:,(i+2)%3],V[:,i]) # correct next vector
V[:,(i+1)%3] /= numpy.sqrt(numpy.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?)
for theStrain in strains:
d = operator(theStrain,D) # operate on eigenvalues of U or V
I = operator(theStrain,numpy.ones(3)) # operate on eigenvalues of I (i.e. [1,1,1])
eps = (numpy.dot(V,numpy.dot(numpy.diag(d),V.T)).real-numpy.diag(I)).reshape(9) # build tensor back from eigenvalue/vector basis
output += '\t'+'\t'.join(map(str,eps))
output += '\n'
if not options.memory:
file['output'].write(output)
output = ''
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
if options.memory:
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close
os.rename(file['name']+'_tmp',file['name'])

148
processing/post/averageDown Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/python
import os,re,sys,math,string,numpy
from optparse import OptionParser, Option
# -----------------------------
class extendableOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
def location(idx,res):
return numpy.array([ idx % res[0], \
(idx // res[0]) % res[1], \
(idx // res[0] // res[1]) % res[2] ])
def index(location,res):
return ( location[0] % res[0] + \
(location[1] % res[1]) * res[0] + \
(location[2] % res[2]) * res[0] * res[1] )
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog [options] [file[s]]', description = """
Average each data block of size 'packing' into single values thus reducing the former resolution
to resolution/packing. (Requires numpy.)
$Id: averageDown 266 2011-05-29 19:38:42Z MPIE\p.eisenlohr $
""")
parser.add_option('-m','--memory', dest='memory', action='store_true', \
help='load complete file into memory [%default]')
parser.add_option('-r','--resolution', dest='res', type='int', nargs=3, \
help='resolution in fast, medium, and slow dimension [%default]')
parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, \
help='number of data points to average down in each dimension [%default]')
parser.set_defaults(memory = False)
parser.set_defaults(resolution = [32,32,32])
parser.set_defaults(packing = [2,2,2])
(options,filenames) = parser.parse_args()
if len(options.resolution) < 3:
parser.error('resolution needs three parameters...')
if len(options.packing) < 3:
parser.error('packing needs three parameters...')
options.packing = numpy.array(options.packing)
# ------------------------------------------ setup file handles ---------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
else:
for name in filenames:
if os.path.exists(name):
(head,tail) = os.path.split(name)
files.append({'name':name, 'input':open(name), 'output':open(os.path.join(head,'avgDown_%s'%tail),'w')})
# ------------------------------------------ loop over input files ---------------------------------------
for file in files:
print file['name']
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
passOn = [file['input'].readline() for i in range(1,headerlines)]
headers = file['input'].readline().split()
else:
headerlines = 1
passOn = []
headers = firstline.split()
if options.memory:
data = file['input'].readlines()
else:
data = []
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader'%(headerlines+1) + '\n' + \
''.join(passOn) + \
string.replace('$Id: averageDown 266 2011-05-29 19:38:42Z MPIE\p.eisenlohr $','\n','\\n')+ '\t' + \
' '.join(sys.argv[1:]) + '\n' + \
'\t'.join(headers) + '\n' # build extended header
if not options.memory:
file['output'].write(output)
output = ''
# ------------------------------------------ read file ---------------------------------------
averagedDown = numpy.zeros([options.res[2]/options.packing[2],
options.res[1]/options.packing[1],
options.res[0]/options.packing[0],
len(headers)])
idx = 0
for line in {True : data,
False : file['input']}[options.memory]:
items = numpy.array(map(float,line.split()[:len(headers)]))
if len(items) < len(headers):
continue
loc = location(idx,options.res)//options.packing
averagedDown[loc[2],loc[1],loc[0],:] += items
idx += 1
averagedDown /= options.packing[0]*options.packing[1]*options.packing[2]
for z in range(options.res[2]/options.packing[2]):
for y in range(options.res[1]/options.packing[1]):
for x in range(options.res[0]/options.packing[0]):
output += '\t'.join(map(str,averagedDown[z,y,x,:])) + '\n'
file['input'].close()
# ------------------------------------------ output result ---------------------------------------
file['output'].write(output)
if file['name'] != 'STDIN':
file['output'].close

View File

@ -21,6 +21,12 @@ bin_link = { \
],
'post' : [
'3Dvisualize',
'addCauchy',
'addDeterminant',
'addDivergence',
'addMises',
'addNorm',
'addStrainTensors',
'mentat_colorMap',
'postResults',
'spectral_iterationCount',