2010-08-17 02:17:27 +05:30
#!/usr/bin/env python
2011-05-24 22:53:22 +05:30
import pdb, os, sys, gc, math, re, threading, time, struct, string
2010-08-17 02:17:27 +05:30
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
2010-08-17 23:51:22 +05:30
releases = {'2010':['linux64',''],
'2008r1':[''],
'2007r1':[''],
'2005r3':[''],
}
2010-08-17 02:17:27 +05:30
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class vector: # mimic py_post node object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
x,y,z = [None,None,None]
def __init__(self,coords):
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
2011-01-12 22:25:56 +05:30
# -----------------------------
class element: # mimic py_post element object
# -----------------------------
2011-02-01 23:55:40 +05:30
items = []
type = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__(self,nodes,type):
self.items = nodes
self.type = type
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class elemental_scalar: # mimic py_post element_scalar object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
id = None
value = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__(self,node,value):
self.id = node
self.value = value
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class MPIEspectral_result: # mimic py_post result object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
file = None
dataOffset = 0
N_elemental_scalars = 0
resolution = [0,0,0]
dimension = [0.0,0.0,0.0]
theTitle = ''
wd = ''
2011-02-21 22:00:18 +05:30
geometry = ''
2011-02-01 23:55:40 +05:30
extrapolate = ''
N_increments = 0
increment = 0
2011-04-12 23:16:35 +05:30
time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
2011-02-01 23:55:40 +05:30
N_nodes = 0
N_node_scalars = 0
N_elements = 0
N_element_scalars = 0
N_element_tensors = 0
def __init__(self,filename):
self.file = open(filename, 'rb')
2011-04-12 23:16:35 +05:30
2011-02-01 23:55:40 +05:30
self.theTitle = self._keyedString('load')
self.wd = self._keyedString('workingdir')
self.geometry = self._keyedString('geometry')
self.N_increments = self._keyedInt('increments')
self.N_element_scalars = self._keyedInt('materialpoint_sizeResults')
self.resolution = self._keyedPackedArray('resolution',3,'i')
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
self.N_elements = self.resolution[0]*self.resolution[1]*self.resolution[2]
self.dimension = self._keyedPackedArray('dimension',3,'d')
2011-04-12 23:16:35 +05:30
2011-02-01 23:55:40 +05:30
self.file.seek(0)
self.dataOffset = self.file.read(2048).find('eoh')+7
def __str__(self):
return '\n'.join([
'title: %s'%self.theTitle,
'workdir: %s'%self.wd,
2011-02-21 22:00:18 +05:30
'geometry: %s'%self.geometry,
2011-02-01 23:55:40 +05:30
'extrapolation: %s'%self.extrapolate,
'increments: %i'%self.N_increments,
'increment: %i'%self.increment,
'nodes: %i'%self.N_nodes,
'resolution: %s'%(','.join(map(str,self.resolution))),
'dimension: %s'%(','.join(map(str,self.dimension))),
'elements: %i'%self.N_elements,
'nodal_scalars: %i'%self.N_node_scalars,
'elemental scalars: %i'%self.N_element_scalars,
'elemental tensors: %i'%self.N_element_tensors,
]
)
def _keyedPackedArray(self,identifier,length = 3,type = 'd'):
match = {'d': 8,'i': 4}
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{%i})'%(match[type])*length),self.file.read(2048),re.DOTALL)
values = []
if m:
for i in m.groups():
values.append(struct.unpack(type,i)[0])
return values
def _keyedInt(self,identifier):
value = None
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{4})'),self.file.read(2048),re.DOTALL)
if m:
value = struct.unpack('i',m.group(1))[0]
return value
def _keyedString(self,identifier):
value = None
self.file.seek(0)
m = re.search(r'(.{4})%s(.*?)\1'%identifier,self.file.read(2048),re.DOTALL)
if m:
value = m.group(2)
return value
def title(self):
return self.theTitle
def moveto(self,inc):
self.increment = inc
def extrapolation(self,value):
self.extrapolate = value
2011-03-11 22:02:01 +05:30
def node_sequence(self,n):
2011-04-13 22:01:44 +05:30
return n-1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node_id(self,n):
return n+1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node(self,n):
a = self.resolution[0]+1
b = self.resolution[1]+1
c = self.resolution[2]+1
return vector([self.dimension[0] * (n%a) / self.resolution[0],
self.dimension[1] * ((n/a)%b) / self.resolution[1],
self.dimension[2] * ((n/a/b)%c) / self.resolution[2],
2011-04-12 23:16:35 +05:30
])
2011-03-11 22:02:01 +05:30
2011-04-13 22:01:44 +05:30
def element_sequence(self,e):
return e-1
2011-03-11 22:02:01 +05:30
def element_id(self,e):
return e+1
2011-04-12 23:16:35 +05:30
2011-03-11 22:02:01 +05:30
def element(self,e):
a = self.resolution[0]+1
b = self.resolution[1]+1
c = self.resolution[2]+1
2011-04-13 22:01:44 +05:30
basenode = 1 + e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
2011-03-11 22:02:01 +05:30
basenode2 = basenode+a*b
2011-04-12 23:16:35 +05:30
return (element([basenode ,basenode +1,basenode +a+1,basenode +a,
2011-03-11 22:02:01 +05:30
basenode2,basenode2+1,basenode2+a+1,basenode2+a,
],117))
2011-02-01 23:55:40 +05:30
def increments(self):
return self.N_increments
def nodes(self):
return self.N_nodes
def node_scalars(self):
return self.N_node_scalars
def elements(self):
return self.N_elements
def element_scalars(self):
return self.N_element_scalars
2011-03-11 22:02:01 +05:30
def element_scalar(self,e,idx):
self.file.seek(self.dataOffset+(self.increment*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(e*self.N_element_scalars + idx)*8))
2011-02-01 23:55:40 +05:30
value = struct.unpack('d',self.file.read(8))[0]
2011-03-11 22:02:01 +05:30
return [elemental_scalar(node,value) for node in self.element(e).items]
2011-02-01 23:55:40 +05:30
def element_scalar_label(elem,idx):
return 'User Defined Variable %i'%(idx+1)
def element_tensors(self):
return self.N_element_tensors
2011-01-12 22:25:56 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
class MyOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
2011-04-12 23:16:35 +05:30
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
class backgroundMessage(threading.Thread):
# -----------------------------
2011-04-12 23:16:35 +05:30
def __init__(self):
threading.Thread.__init__(self)
self.message = ''
self.new_message = ''
self.counter = 0
self.symbols = ['- ', '\ ', '| ', '/ ',]
self.waittime = 0.5
def __quit__(self):
length = len(self.message) + len(self.symbols[self.counter])
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length)
sys.stderr.write('')
def run(self):
while not threading.enumerate()[0]._Thread__stopped:
time.sleep(self.waittime)
self.update_message()
self.__quit__()
def set_message(self, new_message):
self.new_message = new_message
self.print_message()
def print_message(self):
length = len(self.message) + len(self.symbols[self.counter])
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message
sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message
self.message = self.new_message
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
def update_message(self):
self.counter = (self.counter + 1)%len(self.symbols)
self.print_message()
2010-08-17 02:17:27 +05:30
# -----------------------------
def ipCoords(elemType, nodalCoordinates):
#
# returns IP coordinates for a given element
# -----------------------------
nodeWeightsPerNode = {
2011-05-05 14:46:29 +05:30
7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0],
[ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0],
[ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0],
[ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0],
[ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0],
[ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0],
[ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0],
[ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ],
57: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0],
[ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0],
[ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0],
[ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0],
[ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0],
[ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0],
[ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0],
[ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ],
2011-05-06 15:30:27 +05:30
117: [ [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] ],
2011-04-12 23:16:35 +05:30
125: [ [ 3.0, 0.0, 0.0, 4.0, 1.0, 4.0],
2011-05-05 14:46:29 +05:30
[ 0.0, 3.0, 0.0, 4.0, 4.0, 1.0],
[ 0.0, 0.0, 3.0, 1.0, 4.0, 4.0],],
2011-04-12 23:16:35 +05:30
136: [ [42.0, 15.0, 15.0, 14.0, 5.0, 5.0],
2011-05-05 14:46:29 +05:30
[15.0, 42.0, 15.0, 5.0, 14.0, 5.0],
[15.0, 15.0, 42.0, 5.0, 5.0, 14.0],
[14.0, 5.0, 5.0, 42.0, 15.0, 15.0],
[ 5.0, 14.0, 5.0, 15.0, 42.0, 15.0],
[ 5.0, 5.0, 14.0, 15.0, 15.0, 42.0] ],
2011-04-12 23:16:35 +05:30
}
2010-08-17 02:17:27 +05:30
2011-05-05 14:46:29 +05:30
Nips = len(nodeWeightsPerNode[elemType])
ipCoordinates = [[0.0,0.0,0.0] for i in range(Nips)]
for ip in range(Nips):
2010-08-17 02:17:27 +05:30
for node in range(len(nodeWeightsPerNode[elemType][ip])):
for i in range(3):
ipCoordinates[ip][i] += nodeWeightsPerNode[elemType][ip][node] * nodalCoordinates[node][i]
for i in range(3):
ipCoordinates[ip][i] /= sum(nodeWeightsPerNode[elemType][ip])
return ipCoordinates
2011-05-06 15:30:27 +05:30
# -----------------------------
def ipIDs(elemType):
#
# returns IP numbers for given element type
# -----------------------------
ipPerNode = {
7: [ 1, 2, 4, 3, 5, 6, 8, 7 ],
57: [ 1, 2, 4, 3, 5, 6, 8, 7 ],
117: [ 1 ],
125: [ 1, 2, 3 ],
136: [ 1, 2, 3, 4, 5, 6 ],
}
return ipPerNode[elemType]
2010-08-17 02:17:27 +05:30
# -----------------------------
def substituteLocation(string, mesh, coords):
#
# do variable interpolation in group and filter strings
# -----------------------------
substitute = string
substitute = substitute.replace('elem', str(mesh[0]))
substitute = substitute.replace('node', str(mesh[1]))
2011-05-06 15:30:27 +05:30
substitute = substitute.replace('ip', str(mesh[2]))
substitute = substitute.replace('grain', str(mesh[3]))
2010-08-17 02:17:27 +05:30
substitute = substitute.replace('x', '%.6g'%coords[0])
substitute = substitute.replace('y', '%.6g'%coords[1])
substitute = substitute.replace('z', '%.6g'%coords[2])
return substitute
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
def heading(glue,parts):
2010-08-17 02:17:27 +05:30
#
2011-04-12 23:16:35 +05:30
# joins pieces from parts by glue. second to last entry in pieces tells multiplicity
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
header = []
for pieces in parts:
if pieces[-2] == 0:
del pieces[-2]
header.append(glue.join(map(str,pieces)))
return header
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# -----------------------------
def mapIncremental(label, mapping, N, base, new):
#
# applies the function defined by "mapping"
# (can be either 'min','max','avg', 'sum', or user specified)
# to a list of data
# -----------------------------
2011-04-13 22:01:44 +05:30
theMap = { 'min': lambda n,b,a: min(b,a),
'max': lambda n,b,a: max(b,a),
'avg': lambda n,b,a: (n*b+a)/(n+1),
'sum': lambda n,b,a: b+a,
2011-06-08 19:37:15 +05:30
'abssum': lambda n,b,a: b+abs(a),
2011-04-13 22:01:44 +05:30
'unique': lambda n,b,a: {True:a,False:'n/a'}[n==0 or b==a]
}
if mapping in theMap:
mapped = map(theMap[mapping],[N]*len(base),base,new) # map one of the standard functions to data
if label.lower() == 'orientation': # orientation is special case:...
orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion
mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion
2010-08-17 02:17:27 +05:30
else:
2011-04-13 22:01:44 +05:30
try:
2011-06-08 19:26:21 +05:30
mapped = eval('map(%s,[N]*len(base),base,new)'%mapping) # map user defined function to colums in chunks
2011-04-13 22:01:44 +05:30
except:
mapped = ['n/a']*len(base)
2010-08-17 02:17:27 +05:30
return mapped
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-01-12 22:25:56 +05:30
def OpenPostfile(name,type):
2010-08-17 02:17:27 +05:30
#
# open postfile with extrapolation mode "translate"
# -----------------------------
2011-04-12 23:16:35 +05:30
p = {\
'spectral': MPIEspectral_result,\
'marc': post_open,\
}[type]\
(name+
{\
'marc': '.t16',\
'spectral': '.spectralOut',\
}[type]
)
p.extrapolation('translate')
p.moveto(1)
return p
2010-08-17 02:17:27 +05:30
# -----------------------------
def ParseOutputFormat(filename,what,me):
#
# parse .output* files in order to get a list of outputs
# -----------------------------
2011-04-12 23:16:35 +05:30
format = {'outputs':{},'specials':{'brothers':[]}}
for prefix in ['']+map(str,range(1,17)):
if os.path.exists(prefix+filename+'.output'+what):
break
try:
file = open(prefix+filename+'.output'+what)
content = file.readlines()
file.close()
except:
2010-08-17 02:17:27 +05:30
return format
2011-04-12 23:16:35 +05:30
tag = ''
tagID = 0
for line in content:
if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments
continue
m = re.match("\[(.+)\]",line) # look for block indicator
if m: # next section
tag = m.group(1)
tagID += 1
format['specials']['brothers'].append(tag)
if tag == me or (me.isdigit() and tagID == int(me)):
format['specials']['_id'] = tagID
format['outputs'] = []
tag = me
else: # data from section
if tag == me:
(output,length) = line.split()
output.lower()
if length.isdigit():
length = int(length)
if re.match("\((.+)\)",output): # special data, e.g. (Ngrains)
format['specials'][output] = length
elif length > 0:
format['outputs'].append([output,length])
return format
2010-08-17 02:17:27 +05:30
# -----------------------------
def ParsePostfile(p,filename, outputFormat):
#
# parse postfile in order to get position and labels of outputs
# needs "outputFormat" for mapping of output names to postfile output indices
# -----------------------------
2011-04-12 23:16:35 +05:30
# --- build statistics
stat = { \
'IndexOfLabel': {}, \
'Title': p.title(), \
'Extrapolation': p.extrapolate, \
'NumberOfIncrements': p.increments(), \
'NumberOfNodes': p.nodes(), \
'NumberOfNodalScalars': p.node_scalars(), \
'LabelOfNodalScalar': [None]*p.node_scalars() , \
'NumberOfElements': p.elements(), \
'NumberOfElementalScalars': p.element_scalars(), \
'LabelOfElementalScalar': [None]*p.element_scalars() , \
'NumberOfElementalTensors': p.element_tensors(), \
'LabelOfElementalTensor': [None]*p.element_tensors(), \
}
# --- find labels
for labelIndex in range(stat['NumberOfNodalScalars']):
label = p.node_scalar_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfNodalScalar'][labelIndex] = label
for labelIndex in range(stat['NumberOfElementalScalars']):
label = p.element_scalar_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfElementalScalar'][labelIndex] = label
for labelIndex in range(stat['NumberOfElementalTensors']):
label = p.element_tensor_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfElementalTensor'][labelIndex] = label
if 'User Defined Variable 1' in stat['IndexOfLabel']:
stat['IndexOfLabel']['GrainCount'] = stat['IndexOfLabel']['User Defined Variable 1']
if 'GrainCount' in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all?
startIndex = stat['IndexOfLabel']['GrainCount'] - 1
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
# given in the .output* file
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
offset = 2
stat['LabelOfElementalScalar'][startIndex + offset] = 'HomogenizationCount'
for var in outputFormat['Homogenization']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%s'%(i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%s'%(var[0])] = startIndex + offset + 1
offset += var[1]
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Crystallite']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Constitutive']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
return stat
2010-08-17 02:17:27 +05:30
# -----------------------------
def SummarizePostfile(stat,where=sys.stdout):
# -----------------------------
2011-04-12 23:16:35 +05:30
where.write('\n\n')
where.write('title:\t%s'%stat['Title'] + '\n\n')
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n')
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n')
where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n')
return True
2010-08-17 02:17:27 +05:30
# -----------------------------
# MAIN FUNCTION STARTS HERE
# -----------------------------
# --- input parsing
parser = OptionParser(option_class=MyOption, usage='%prog [options] resultfile', description = """
2011-02-01 18:43:05 +05:30
Extract data from a .t16 (MSC.Marc) or .spectralOut results file.
2010-08-17 02:17:27 +05:30
List of output variables is given by options '--ns','--es','--et','--ho','--cr','--co'.
2011-05-26 15:11:53 +05:30
Filters and separations use 'elem','node','ip','grain', and 'x','y','z' as key words.
2010-08-17 02:17:27 +05:30
Example:
2011-05-26 15:11:53 +05:30
1) get averaged results in slices perpendicular to x for all negative y coordinates
--filter 'y < 0.0' --separation x --map 'avg'
2010-08-17 02:17:27 +05:30
2) global sum of squared data falling into first quadrant arc between R1 and R2
2011-05-26 15:11:53 +05:30
--filter 'x >= 0.0 and y >= 0.0 and x*x + y*y >= R1*R1 and x*x + y*y <= R2*R2'
--map 'lambda n,b,a: n*b+a*a'
User mappings need to be formulated in an incremental fashion for each new data point, a(dd),
and may use the current (incremental) result, b(ase), as well as the number, n(umber),
of already processed data points for evaluation.
2010-08-17 02:17:27 +05:30
2010-08-17 23:51:22 +05:30
$Id$
2010-08-17 02:17:27 +05:30
""")
parser.add_option('-i','--info', action='store_true', dest='info', \
2011-04-12 23:16:35 +05:30
help='list contents of resultfile [%default]')
2010-08-17 02:17:27 +05:30
parser.add_option('-d','--dir', dest='directory', \
2011-04-12 23:16:35 +05:30
help='name of subdirectory to hold output [%default]')
2011-01-12 22:25:56 +05:30
parser.add_option('-s','--split', action='store_true', dest='separateFiles', \
2011-04-12 23:16:35 +05:30
help='split output per increment [%default]')
2010-08-17 02:17:27 +05:30
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
2011-04-12 23:16:35 +05:30
help='range of increments to output (start, end, step) [all]')
2011-05-02 21:40:18 +05:30
parser.add_option('--sloppy', action='store_true', dest='sloppy', \
help='do not pre-check validity of increment range')
2010-08-17 02:17:27 +05:30
parser.add_option('-m','--map', dest='func', type='string', \
2011-06-08 19:37:15 +05:30
help='data reduction mapping ["%default"] out of min, max, avg, sum, abssum or user-lambda')
2011-01-12 22:25:56 +05:30
parser.add_option('-p','--type', dest='filetype', type='string', \
2011-04-12 23:16:35 +05:30
help = 'type of result file [%default]')
2010-08-17 02:17:27 +05:30
group_material = OptionGroup(parser,'Material identifier')
group_special = OptionGroup(parser,'Special outputs')
group_general = OptionGroup(parser,'General outputs')
group_material.add_option('--homogenization', dest='homog', type='string', \
2011-04-12 23:16:35 +05:30
help='homogenization identifier (as string or integer [%default])')
2010-08-17 02:17:27 +05:30
group_material.add_option('--crystallite', dest='cryst', type='string', \
2011-04-12 23:16:35 +05:30
help='crystallite identifier (as string or integer [%default])')
2010-08-17 02:17:27 +05:30
group_material.add_option('--phase', dest='phase', type='string', \
2011-04-12 23:16:35 +05:30
help='phase identifier (as string or integer [%default])')
2010-08-17 02:17:27 +05:30
group_special.add_option('-t','--time', action='store_true', dest='time', \
2011-04-12 23:16:35 +05:30
help='output time of increment [%default]')
2010-08-17 02:17:27 +05:30
group_special.add_option('-f','--filter', dest='filter', type='string', \
2011-04-12 23:16:35 +05:30
help='condition(s) to filter results [%default]')
2010-08-17 02:17:27 +05:30
group_special.add_option('--separation', action='extend', dest='separation', type='string', \
2011-04-12 23:16:35 +05:30
help='properties to separate results [%default]')
2011-02-01 18:54:19 +05:30
group_special.add_option('--sort', action='extend', dest='sort', type='string', \
2011-04-12 23:16:35 +05:30
help='properties to sort results [%default]')
2010-08-17 02:17:27 +05:30
group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \
2011-04-12 23:16:35 +05:30
help='list of nodal scalars to extract')
2010-08-17 02:17:27 +05:30
group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \
2011-04-12 23:16:35 +05:30
help='list of elemental scalars to extract')
2010-08-17 02:17:27 +05:30
group_general.add_option('--et', action='extend', dest='elementalTensor', type='string', \
2011-04-12 23:16:35 +05:30
help='list of elemental tensors to extract')
2010-08-17 02:17:27 +05:30
group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \
2011-04-12 23:16:35 +05:30
help='list of homogenization results to extract')
2010-08-17 02:17:27 +05:30
group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \
2011-04-12 23:16:35 +05:30
help='list of crystallite results to extract')
2010-08-17 02:17:27 +05:30
group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \
2011-04-12 23:16:35 +05:30
help='list of constitutive results to extract')
2010-08-17 02:17:27 +05:30
parser.add_option_group(group_material)
parser.add_option_group(group_general)
parser.add_option_group(group_special)
parser.set_defaults(info = False)
2011-05-02 21:40:18 +05:30
parser.set_defaults(sloppy = False)
2010-08-17 02:17:27 +05:30
parser.set_defaults(directory = 'postProc')
2011-01-12 22:25:56 +05:30
parser.set_defaults(filetype = 'marc')
2010-08-17 02:17:27 +05:30
parser.set_defaults(func = 'avg')
parser.set_defaults(homog = '1')
parser.set_defaults(cryst = '1')
parser.set_defaults(phase = '1')
parser.set_defaults(filter = '')
parser.set_defaults(separation = [])
2011-02-01 18:54:19 +05:30
parser.set_defaults(sort = [])
2010-08-17 02:17:27 +05:30
parser.set_defaults(inc = False)
parser.set_defaults(time = False)
parser.set_defaults(separateFiles = False)
2011-03-11 22:02:01 +05:30
(options, files) = parser.parse_args()
2010-08-17 02:17:27 +05:30
2011-03-10 15:15:57 +05:30
options.filetype = options.filetype.lower()
if options.filetype == 'marc':
2011-04-12 23:16:35 +05:30
try:
file = open('%s/../MSCpath'%os.path.dirname(os.path.realpath(sys.argv[0])))
MSCpath = os.path.normpath(file.readline().strip())
file.close()
except:
MSCpath = '/msc'
for release,subdirs in sorted(releases.items(),reverse=True):
for subdir in subdirs:
libPath = '%s/mentat%s/shlib/%s'%(MSCpath,release,subdir)
if os.path.exists(libPath):
sys.path.append(libPath)
break
else:
continue
break
try:
from py_post import *
except:
print('error: no valid Mentat release found in %s'%MSCpath)
sys.exit(-1)
2011-03-10 15:15:57 +05:30
else:
def post_open():
return
2011-02-22 21:27:27 +05:30
2010-08-17 02:17:27 +05:30
# --- sanity checks
2011-03-11 22:02:01 +05:30
if files == []:
2011-04-12 23:16:35 +05:30
parser.print_help()
parser.error('no file specified...')
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
if not os.path.exists(files[0]):
2011-04-12 23:16:35 +05:30
parser.print_help()
parser.error('invalid file "%s" specified...'%files[0])
2011-02-01 18:54:19 +05:30
2011-03-10 15:15:57 +05:30
if options.filetype not in ['marc','spectral']:
2011-04-12 23:16:35 +05:30
parser.print_help()
parser.error('file type "%s" not supported...'%options.filetype)
2011-01-12 22:25:56 +05:30
2010-08-17 02:17:27 +05:30
if options.constitutiveResult and not options.phase:
2011-04-12 23:16:35 +05:30
parser.print_help()
parser.error('constitutive results require phase...')
2010-08-17 02:17:27 +05:30
if options.nodalScalar and ( options.elementalScalar or options.elementalTensor
or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ):
2011-04-12 23:16:35 +05:30
parser.print_help()
parser.error('not allowed to mix nodal with elemental results...')
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
if not options.nodalScalar: options.nodalScalar = []
if not options.elementalScalar: options.elementalScalar = []
if not options.elementalTensor: options.elementalTensor = []
if not options.homogenizationResult: options.homogenizationResult = []
if not options.crystalliteResult: options.crystalliteResult = []
if not options.constitutiveResult: options.constitutiveResult = []
2010-08-17 02:17:27 +05:30
2011-04-13 22:01:44 +05:30
options.sort.reverse()
options.separation.reverse()
2011-03-11 22:02:01 +05:30
# --- start background messaging
bg = backgroundMessage()
bg.start()
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
# --- parse .output and .t16 files
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
filename = os.path.splitext(files[0])[0]
2010-08-17 02:17:27 +05:30
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory
if not os.path.isdir(dirname):
2011-04-12 23:16:35 +05:30
os.mkdir(dirname,0755)
2010-08-17 02:17:27 +05:30
outputFormat = {}
me = {
2011-04-12 23:16:35 +05:30
'Homogenization': options.homog,
'Crystallite': options.cryst,
'Constitutive': options.phase,
2010-08-17 02:17:27 +05:30
}
2011-03-11 22:02:01 +05:30
bg.set_message('parsing .output files...')
2010-08-17 02:17:27 +05:30
for what in me:
2011-04-12 23:16:35 +05:30
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
if not '_id' in outputFormat[what]['specials']:
print "'%s' not found in <%s>"%(me[what], what)
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
sys.exit(1)
2011-03-11 22:02:01 +05:30
bg.set_message('opening result file...')
2011-01-12 22:25:56 +05:30
p = OpenPostfile(filename,options.filetype)
2011-03-11 22:02:01 +05:30
bg.set_message('parsing result file...')
2010-08-17 02:17:27 +05:30
stat = ParsePostfile(p, filename, outputFormat)
2011-03-11 22:02:01 +05:30
if options.filetype == 'marc':
2011-04-12 23:16:35 +05:30
stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0)
2010-08-17 02:17:27 +05:30
# --- sanity check for output variables
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different
for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationResult','crystalliteResult','constitutiveResult']:
2011-04-12 23:16:35 +05:30
if eval('options.%s'%opt):
for label in eval('options.%s'%opt):
if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and not label in stat['IndexOfLabel']) \
or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \
and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])):
parser.error('%s "%s" unknown...'%(opt,label))
2010-08-17 02:17:27 +05:30
# --- output info
if options.info:
2011-04-12 23:16:35 +05:30
if options.filetype == 'marc':
print '\n\nMentat release %s'%release
2011-03-10 15:15:57 +05:30
2011-04-12 23:16:35 +05:30
SummarizePostfile(stat,sys.stderr)
print '\nUser Defined Outputs'
for what in me:
print '\n ',what,':'
for output in outputFormat[what]['outputs']:
print ' ',output
sys.exit(0)
2010-08-17 02:17:27 +05:30
# --- get output data from .t16 file
2011-03-11 22:02:01 +05:30
increments = range(stat['NumberOfIncrements'])
if options.filetype == 'marc':
offset_inc = 1
2010-08-17 02:17:27 +05:30
else:
2011-03-11 22:02:01 +05:30
offset_inc = 0
if options.range:
options.range = list(options.range)
2011-05-02 21:40:18 +05:30
if options.sloppy:
increments = range(options.range[0],options.range[1]+1,options.range[2])
else:
increments = range( max(0,options.range[0]),
min(stat['NumberOfIncrements'],options.range[1]+1),
options.range[2])
2010-08-17 02:17:27 +05:30
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- build group membership --------------------------------
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
p.moveto(increments[0]+offset_inc)
index = {}
groups = []
groupCount = 0
memberCount = 0
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
if options.nodalScalar:
for n in xrange(stat['NumberOfNodes']):
if n%1000 == 0:
bg.set_message('scan node %i...'%n)
myNodeID = p.node_id(n)
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
myElemID = 0
2011-05-06 15:30:27 +05:30
myIpID = 0
2011-04-12 23:16:35 +05:30
myGrainID = 0
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# --- filter valid locations
2010-08-17 02:17:27 +05:30
2011-05-06 15:30:27 +05:30
filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates an expression that is only true for the locations specified by options.filter
2011-04-12 23:16:35 +05:30
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# --- group data locations
2010-08-17 02:17:27 +05:30
2011-05-06 15:30:27 +05:30
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
2011-04-12 23:16:35 +05:30
if grp not in index: # create a new group if not yet present
index[grp] = groupCount
2011-05-06 15:30:27 +05:30
groups[groupCount] = [[0,0,0,0,0.0,0.0,0.0]] # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount += 1
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][:4] = mapIncremental('','unique',
2011-04-13 22:01:44 +05:30
len(groups[index[grp]])-1,
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][:4],
[myElemID,myNodeID,myIpID,myGrainID]) # keep only if unique average location
groups[index[grp]][0][4:] = mapIncremental('','avg',
2011-04-13 22:01:44 +05:30
len(groups[index[grp]])-1,
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][4:],
myNodeCoordinates) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myIpID,myGrainID,0]) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount += 1
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
else:
for e in xrange(stat['NumberOfElements']):
if e%1000 == 0:
bg.set_message('scan elem %i...'%e)
myElemID = p.element_id(e)
2011-05-06 15:30:27 +05:30
myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence, p.element(e).items))))
myIpIDs = ipIDs(p.element(e).type)
Nips = len(myIpIDs)
myNodeIDs = p.element(e).items[:Nips]
for n in range(Nips):
myIpID = myIpIDs[n]
myNodeID = myNodeIDs[n]
2011-04-12 23:16:35 +05:30
for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))
or 1):
myGrainID = g + 1
2011-02-02 00:02:20 +05:30
2011-04-12 23:16:35 +05:30
# --- filter valid locations
2011-05-06 15:30:27 +05:30
filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter
2011-04-12 23:16:35 +05:30
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
# --- group data locations
2011-05-06 15:30:27 +05:30
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
2011-04-12 23:16:35 +05:30
if grp not in index: # create a new group if not yet present
index[grp] = groupCount
2011-05-06 15:30:27 +05:30
groups.append([[0,0,0,0,0.0,0.0,0.0]]) # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount += 1
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][:4] = mapIncremental('','unique',
2011-04-13 22:01:44 +05:30
len(groups[index[grp]])-1,
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][:4],
[myElemID,myNodeID,myIpID,myGrainID]) # keep only if unique average location
groups[index[grp]][0][4:] = mapIncremental('','avg',
2011-04-13 22:01:44 +05:30
len(groups[index[grp]])-1,
2011-05-06 15:30:27 +05:30
groups[index[grp]][0][4:],
myIpCoordinates[n]) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myIpID,myGrainID,n]) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount += 1
2011-05-06 15:30:27 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- sort groups --------------------------------
where = {
2011-05-06 15:30:27 +05:30
'elem': 0,
'node': 1,
'ip': 2,
'grain': 3,
'x': 4,
'y': 5,
'z': 6,
2011-04-12 23:16:35 +05:30
}
sortProperties = []
2011-04-13 22:01:44 +05:30
for item in options.separation:
if item not in options.sort:
2011-04-12 23:16:35 +05:30
sortProperties.append(item)
theKeys = []
2011-04-13 22:01:44 +05:30
for criterium in options.sort+sortProperties:
2011-04-12 23:16:35 +05:30
if criterium in where:
theKeys.append('x[0][%i]'%where[criterium])
sortKeys = eval('lambda x:(%s)'%(','.join(theKeys)))
bg.set_message('sorting groups...')
groups.sort(key = sortKeys) # in-place sorting to save mem
fileOpen = False
assembleHeader = True
header = []
standard = ['inc'] + \
{True: ['time'],
False:[]}[options.time] + \
2011-05-06 15:30:27 +05:30
['elem','node','ip','grain'] + \
2011-04-12 23:16:35 +05:30
{True: ['node.x','node.y','node.z'],
False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []]
# --------------------------- loop over increments --------------------------------
time_start = time.time()
for incCount,increment in enumerate(increments):
p.moveto(increment+offset_inc)
# --------------------------- file management --------------------------------
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
if options.separateFiles:
if fileOpen:
file.close()
fileOpen = False
outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],increment)')
else:
outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1])
if not fileOpen:
file = open(outFilename,'w')
fileOpen = True
2011-05-23 12:43:28 +05:30
file.write('2\theader\n')
2011-05-24 22:53:22 +05:30
file.write(string.replace('$Id$','\n','\\n')+
'\t' + ' '.join(sys.argv[1:]) + '\n')
2011-04-12 23:16:35 +05:30
headerWritten = False
file.flush()
# --------------------------- read and map data per group --------------------------------
member = 0
2011-05-06 15:30:27 +05:30
for group in groups:
2011-04-12 23:16:35 +05:30
N = 0 # group member counter
2011-05-06 15:30:27 +05:30
for (e,n,i,g,n_local) in group[1:]: # loop over group members
2011-04-12 23:16:35 +05:30
member += 1
if member%1000 == 0:
time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment))
2011-04-13 22:01:44 +05:30
2011-04-12 23:16:35 +05:30
newby = [] # current member's data
2011-04-13 22:01:44 +05:30
2011-04-12 23:16:35 +05:30
if options.elementalScalar:
for label in options.elementalScalar:
if assembleHeader:
header += [label.replace(' ','')]
newby.append({'label':label,
'len':1,
2011-04-13 22:01:44 +05:30
'content':[ p.element_scalar(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local].value ]})
2011-04-12 23:16:35 +05:30
if options.elementalTensor:
for label in options.elementalTensor:
if assembleHeader:
header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']])
2011-04-13 22:01:44 +05:30
myTensor = p.element_tensor(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local]
2011-04-12 23:16:35 +05:30
newby.append({'label':label,
2011-05-05 14:46:29 +05:30
'len':7,
2011-04-12 23:16:35 +05:30
'content':[ myTensor.intensity,
myTensor.t11, myTensor.t22, myTensor.t33,
myTensor.t12, myTensor.t23, myTensor.t13,
]})
if options.homogenizationResult or \
options.crystalliteResult or \
options.constitutiveResult:
for (label,resultType) in zip(options.homogenizationResult +
options.crystalliteResult +
options.constitutiveResult,
['Homogenization']*len(options.homogenizationResult) +
['Crystallite']*len(options.crystalliteResult) +
['Constitutive']*len(options.constitutiveResult)
):
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
length = int(outputFormat[resultType]['outputs'][outputIndex][1])
2011-05-24 22:53:22 +05:30
thisHead = heading('_',[[component,label] for component in range(int(length>1),length+int(length>1))])
2011-04-12 23:16:35 +05:30
if assembleHeader: header += thisHead
2011-05-24 22:53:22 +05:30
if resultType != 'Homogenization':
thisHead = heading('_',[[g,component,label] for component in range(int(length>1),length+int(length>1))])
2011-04-12 23:16:35 +05:30
newby.append({'label':label,
'len':length,
2011-04-13 22:01:44 +05:30
'content':[ p.element_scalar(p.element_sequence(e),stat['IndexOfLabel'][head])[n_local].value
2011-04-12 23:16:35 +05:30
for head in thisHead ]})
assembleHeader = False
2011-04-13 22:01:44 +05:30
if N == 0:
2011-05-24 22:53:22 +05:30
mappedResult = [float(x) for x in xrange(len(header))] # initialize with debug data (should get deleted by *N at N=0)
2011-04-12 23:16:35 +05:30
pos = 0
for chunk in newby:
mappedResult[pos:pos+chunk['len']] = mapIncremental(chunk['label'],options.func,
N,mappedResult[pos:pos+chunk['len']],chunk['content'])
pos += chunk['len']
N += 1
# --- write data row to file ---
if not headerWritten:
file.write('\t'.join(standard + header) + '\n')
headerWritten = True
file.write('\t'.join(map(str,[increment] + \
{True:[p.time],False:[]}[options.time] + \
group[0] + \
mappedResult)
) + '\n')
2010-08-17 02:17:27 +05:30
if fileOpen:
2011-04-12 23:16:35 +05:30
file.close()
2010-08-17 02:17:27 +05:30
# --------------------------- DONE --------------------------------