adopted ASCII_TABLE class
This commit is contained in:
parent
42c3074821
commit
9d3f7b8d3d
|
@ -1,16 +1,8 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
|
|
||||||
import os,re,sys,math,numpy,string
|
import os,re,sys,math,numpy,string,damask_tools
|
||||||
from optparse import OptionParser, Option
|
from optparse import OptionParser, Option
|
||||||
|
|
||||||
def operator(how,vector):
|
|
||||||
return { \
|
|
||||||
'ln': numpy.log(vector)*1.0,\
|
|
||||||
'Biot': vector *1.0,\
|
|
||||||
'Green': vector*vector *0.5,\
|
|
||||||
}[how]
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
class extendableOption(Option):
|
class extendableOption(Option):
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
|
@ -30,10 +22,14 @@ class extendableOption(Option):
|
||||||
Option.take_action(self, action, dest, opt, value, values, parser)
|
Option.take_action(self, action, dest, opt, value, values, parser)
|
||||||
|
|
||||||
|
|
||||||
def prefixMultiply(what,len):
|
|
||||||
|
|
||||||
return {True: ['%i_%s'%(i+1,what) for i in range(len)],
|
def operator(how,vector):
|
||||||
False:[what]}[len>1]
|
return { \
|
||||||
|
'ln': numpy.log(vector)*1.0,\
|
||||||
|
'Biot': vector *1.0,\
|
||||||
|
'Green': vector*vector *0.5,\
|
||||||
|
}[how]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
@ -47,8 +43,6 @@ Add column(s) containing given strains based on given stretches of requested def
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
parser.add_option('-m','--memory', dest='memory', action='store_true', \
|
|
||||||
help='load complete file into memory [%default]')
|
|
||||||
parser.add_option('-u','--right', action='store_true', dest='right', \
|
parser.add_option('-u','--right', action='store_true', dest='right', \
|
||||||
help='calculate strains based on right Cauchy--Green deformation, i.e., C and U')
|
help='calculate strains based on right Cauchy--Green deformation, i.e., C and U')
|
||||||
parser.add_option('-v','--left', action='store_true', dest='left', \
|
parser.add_option('-v','--left', action='store_true', dest='left', \
|
||||||
|
@ -62,7 +56,6 @@ parser.add_option('-g','--green', action='store_true', dest='green', \
|
||||||
parser.add_option('-f','--deformation', dest='defgrad', action='extend', type='string', \
|
parser.add_option('-f','--deformation', dest='defgrad', action='extend', type='string', \
|
||||||
help='heading(s) of columns containing deformation tensor values %default')
|
help='heading(s) of columns containing deformation tensor values %default')
|
||||||
|
|
||||||
parser.set_defaults(memory = False)
|
|
||||||
parser.set_defaults(right = False)
|
parser.set_defaults(right = False)
|
||||||
parser.set_defaults(left = False)
|
parser.set_defaults(left = False)
|
||||||
parser.set_defaults(logarithmic = False)
|
parser.set_defaults(logarithmic = False)
|
||||||
|
@ -104,30 +97,12 @@ else:
|
||||||
# ------------------------------------------ loop over input files ---------------------------------------
|
# ------------------------------------------ loop over input files ---------------------------------------
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
print file['name']
|
if file['name'] != 'STDIN': print file['name']
|
||||||
|
|
||||||
# get labels by either read the first row, or - if keyword header is present - the last line of the header
|
table = damask_tools.ASCII_TABLE(file['input'],file['output'],False) # make unbuffered ASCII_table
|
||||||
|
table.head_read() # read ASCII header info
|
||||||
firstline = file['input'].readline()
|
table.info_append(string.replace('$Id$','\n','\\n') + \
|
||||||
m = re.search('(\d+)\s*head', firstline.lower())
|
'\t' + ' '.join(sys.argv[1:]))
|
||||||
if m:
|
|
||||||
headerlines = int(m.group(1))
|
|
||||||
passOn = [file['input'].readline() for i in range(1,headerlines)]
|
|
||||||
headers = file['input'].readline().split()
|
|
||||||
else:
|
|
||||||
headerlines = 1
|
|
||||||
passOn = []
|
|
||||||
headers = firstline.split()
|
|
||||||
|
|
||||||
if options.memory:
|
|
||||||
data = file['input'].readlines()
|
|
||||||
else:
|
|
||||||
data = []
|
|
||||||
|
|
||||||
for i,l in enumerate(headers):
|
|
||||||
if l.startswith('1_'):
|
|
||||||
if re.match('\d+_',l[2:]) or i == len(headers)-1 or not headers[i+1].endswith(l[2:]):
|
|
||||||
headers[i] = l[2:]
|
|
||||||
|
|
||||||
active = {}
|
active = {}
|
||||||
column = {}
|
column = {}
|
||||||
|
@ -137,50 +112,37 @@ for file in files:
|
||||||
for label in info['label']:
|
for label in info['label']:
|
||||||
key = {True :'1_%s',
|
key = {True :'1_%s',
|
||||||
False:'%s' }[info['len']>1]%label
|
False:'%s' }[info['len']>1]%label
|
||||||
if key not in headers:
|
if key not in table.labels:
|
||||||
sys.stderr.write('column %s not found...\n'%key)
|
sys.stderr.write('column %s not found...\n'%key)
|
||||||
else:
|
else:
|
||||||
if datatype not in active: active[datatype] = []
|
if datatype not in active: active[datatype] = []
|
||||||
if datatype not in column: column[datatype] = {}
|
if datatype not in column: column[datatype] = {}
|
||||||
active[datatype].append(label)
|
active[datatype].append(label)
|
||||||
column[datatype][label] = headers.index(key)
|
column[datatype][label] = table.labels.index(key)
|
||||||
for theStretch in stretches:
|
for theStretch in stretches:
|
||||||
for theStrain in strains:
|
for theStrain in strains:
|
||||||
head += prefixMultiply('%s(%s)'%(theStrain,theStretch), 9)
|
table.labels_append(['%i_%s(%s)'%(i+1,theStrain,theStretch)
|
||||||
|
for i in xrange(datainfo['defgrad']['len'])]) # extend ASCII header with new labels
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
# ------------------------------------------ assemble header ---------------------------------------
|
||||||
|
|
||||||
output = '%i\theader'%(headerlines+1) + '\n' + \
|
table.head_write()
|
||||||
''.join(passOn) + \
|
|
||||||
string.replace('$Id$','\n','\\n')+ '\t' + \
|
|
||||||
' '.join(sys.argv[1:]) + '\n' + \
|
|
||||||
'\t'.join(headers + head) + '\n' # build extended header
|
|
||||||
|
|
||||||
if not options.memory:
|
# ------------------------------------------ process data ---------------------------------------
|
||||||
file['output'].write(output)
|
|
||||||
output = ''
|
|
||||||
|
|
||||||
# ------------------------------------------ read file ---------------------------------------
|
while table.data_read(): # read next data line of ASCII table
|
||||||
|
|
||||||
for line in {True : data,
|
for datatype,labels in active.items(): # loop over vector,tensor
|
||||||
False : file['input']}[options.memory]:
|
for label in labels: # loop over all requested norms
|
||||||
items = line.split()[:len(headers)]
|
F = numpy.array(map(float,table.data[column['defgrad'][active['defgrad'][0]]:
|
||||||
if len(items) < len(headers):
|
column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3)
|
||||||
continue
|
(U,S,Vh) = numpy.linalg.svd(F)
|
||||||
|
|
||||||
output += '\t'.join(items)
|
|
||||||
|
|
||||||
for datatype,labels in active.items():
|
|
||||||
for label in labels:
|
|
||||||
defgrad = numpy.array(map(float,items[column[datatype][label]:
|
|
||||||
column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(3,3)
|
|
||||||
(U,S,Vh) = numpy.linalg.svd(defgrad)
|
|
||||||
R = numpy.dot(U,Vh)
|
R = numpy.dot(U,Vh)
|
||||||
stretch['U'] = numpy.dot(numpy.linalg.inv(R),defgrad)
|
stretch['U'] = numpy.dot(numpy.linalg.inv(R),F)
|
||||||
stretch['V'] = numpy.dot(defgrad,numpy.linalg.inv(R))
|
stretch['V'] = numpy.dot(F,numpy.linalg.inv(R))
|
||||||
for theStretch in stretches:
|
for theStretch in stretches:
|
||||||
for i in range(9):
|
for i in range(9):
|
||||||
if stretch[theStretch][i%3,i//3] < 1e-15:
|
if abs(stretch[theStretch][i%3,i//3]) < 1e-15: # kill nasty noisy data
|
||||||
stretch[theStretch][i%3,i//3] = 0.0
|
stretch[theStretch][i%3,i//3] = 0.0
|
||||||
(D,V) = numpy.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix)
|
(D,V) = numpy.linalg.eig(stretch[theStretch]) # eigen decomposition (of symmetric matrix)
|
||||||
for i,eigval in enumerate(D):
|
for i,eigval in enumerate(D):
|
||||||
|
@ -194,22 +156,16 @@ for file in files:
|
||||||
d = operator(theStrain,D) # operate on eigenvalues of U or V
|
d = operator(theStrain,D) # operate on eigenvalues of U or V
|
||||||
I = operator(theStrain,numpy.ones(3)) # operate on eigenvalues of I (i.e. [1,1,1])
|
I = operator(theStrain,numpy.ones(3)) # operate on eigenvalues of I (i.e. [1,1,1])
|
||||||
eps = (numpy.dot(V,numpy.dot(numpy.diag(d),V.T)).real-numpy.diag(I)).reshape(9) # build tensor back from eigenvalue/vector basis
|
eps = (numpy.dot(V,numpy.dot(numpy.diag(d),V.T)).real-numpy.diag(I)).reshape(9) # build tensor back from eigenvalue/vector basis
|
||||||
output += '\t'+'\t'.join(map(str,eps))
|
|
||||||
|
|
||||||
|
table.data_append(list(eps))
|
||||||
|
|
||||||
output += '\n'
|
table.data_write() # output processed line
|
||||||
|
|
||||||
if not options.memory:
|
|
||||||
file['output'].write(output)
|
|
||||||
output = ''
|
|
||||||
|
|
||||||
file['input'].close()
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
# ------------------------------------------ output result ---------------------------------------
|
||||||
|
|
||||||
if options.memory:
|
table.output_flush() # just in case of buffered ASCII table
|
||||||
file['output'].write(output)
|
|
||||||
|
|
||||||
|
file['input'].close() # close input ASCII table
|
||||||
if file['name'] != 'STDIN':
|
if file['name'] != 'STDIN':
|
||||||
file['output'].close
|
file['output'].close # close output ASCII table
|
||||||
os.rename(file['name']+'_tmp',file['name'])
|
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||||
|
|
Loading…
Reference in New Issue