DAMASK_EICMD/processing/post/postResults

991 lines
37 KiB
Python
Executable File

#!/usr/bin/env python
import pdb, os, sys, gc, math, re, threading, time, struct
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
releases = {'2010':['linux64',''],
'2008r1':[''],
'2007r1':[''],
'2005r3':[''],
}
# -----------------------------
class vector: # mimic py_post node object
# -----------------------------
x,y,z = [None,None,None]
def __init__(self,coords):
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
# -----------------------------
class element: # mimic py_post element object
# -----------------------------
items = []
type = None
def __init__(self,nodes,type):
self.items = nodes
self.type = type
# -----------------------------
class elemental_scalar: # mimic py_post element_scalar object
# -----------------------------
id = None
value = None
def __init__(self,node,value):
self.id = node
self.value = value
# -----------------------------
class MPIEspectral_result: # mimic py_post result object
# -----------------------------
file = None
dataOffset = 0
N_elemental_scalars = 0
resolution = [0,0,0]
dimension = [0.0,0.0,0.0]
theTitle = ''
wd = ''
geometry = ''
extrapolate = ''
N_increments = 0
increment = 0
time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
N_nodes = 0
N_node_scalars = 0
N_elements = 0
N_element_scalars = 0
N_element_tensors = 0
def __init__(self,filename):
self.file = open(filename, 'rb')
self.theTitle = self._keyedString('load')
self.wd = self._keyedString('workingdir')
self.geometry = self._keyedString('geometry')
self.N_increments = self._keyedInt('increments')
self.N_element_scalars = self._keyedInt('materialpoint_sizeResults')
self.resolution = self._keyedPackedArray('resolution',3,'i')
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
self.N_elements = self.resolution[0]*self.resolution[1]*self.resolution[2]
self.dimension = self._keyedPackedArray('dimension',3,'d')
self.file.seek(0)
self.dataOffset = self.file.read(2048).find('eoh')+7
def __str__(self):
return '\n'.join([
'title: %s'%self.theTitle,
'workdir: %s'%self.wd,
'geometry: %s'%self.geometry,
'extrapolation: %s'%self.extrapolate,
'increments: %i'%self.N_increments,
'increment: %i'%self.increment,
'nodes: %i'%self.N_nodes,
'resolution: %s'%(','.join(map(str,self.resolution))),
'dimension: %s'%(','.join(map(str,self.dimension))),
'elements: %i'%self.N_elements,
'nodal_scalars: %i'%self.N_node_scalars,
'elemental scalars: %i'%self.N_element_scalars,
'elemental tensors: %i'%self.N_element_tensors,
]
)
def _keyedPackedArray(self,identifier,length = 3,type = 'd'):
match = {'d': 8,'i': 4}
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{%i})'%(match[type])*length),self.file.read(2048),re.DOTALL)
values = []
if m:
for i in m.groups():
values.append(struct.unpack(type,i)[0])
return values
def _keyedInt(self,identifier):
value = None
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{4})'),self.file.read(2048),re.DOTALL)
if m:
value = struct.unpack('i',m.group(1))[0]
return value
def _keyedString(self,identifier):
value = None
self.file.seek(0)
m = re.search(r'(.{4})%s(.*?)\1'%identifier,self.file.read(2048),re.DOTALL)
if m:
value = m.group(2)
return value
def title(self):
return self.theTitle
def moveto(self,inc):
self.increment = inc
def extrapolation(self,value):
self.extrapolate = value
def node_sequence(self,n):
return n
def node_id(self,n):
return n+1
def node(self,n):
a = self.resolution[0]+1
b = self.resolution[1]+1
c = self.resolution[2]+1
return vector([self.dimension[0] * (n%a) / self.resolution[0],
self.dimension[1] * ((n/a)%b) / self.resolution[1],
self.dimension[2] * ((n/a/b)%c) / self.resolution[2],
])
def element_id(self,e):
return e+1
def element(self,e):
a = self.resolution[0]+1
b = self.resolution[1]+1
c = self.resolution[2]+1
basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
basenode2 = basenode+a*b
return (element([basenode ,basenode +1,basenode +a+1,basenode +a,
basenode2,basenode2+1,basenode2+a+1,basenode2+a,
],117))
def increments(self):
return self.N_increments
def nodes(self):
return self.N_nodes
def node_scalars(self):
return self.N_node_scalars
def elements(self):
return self.N_elements
def element_scalars(self):
return self.N_element_scalars
def element_scalar(self,e,idx):
self.file.seek(self.dataOffset+(self.increment*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(e*self.N_element_scalars + idx)*8))
value = struct.unpack('d',self.file.read(8))[0]
return [elemental_scalar(node,value) for node in self.element(e).items]
def element_scalar_label(elem,idx):
return 'User Defined Variable %i'%(idx+1)
def element_tensors(self):
return self.N_element_tensors
# -----------------------------
class MyOption(Option):
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
# -----------------------------
class backgroundMessage(threading.Thread):
# -----------------------------
def __init__(self):
threading.Thread.__init__(self)
self.message = ''
self.new_message = ''
self.counter = 0
self.symbols = ['- ', '\ ', '| ', '/ ',]
self.waittime = 0.5
def __quit__(self):
length = len(self.message) + len(self.symbols[self.counter])
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length)
sys.stderr.write('')
def run(self):
while not threading.enumerate()[0]._Thread__stopped:
time.sleep(self.waittime)
self.update_message()
self.__quit__()
def set_message(self, new_message):
self.new_message = new_message
self.print_message()
def print_message(self):
length = len(self.message) + len(self.symbols[self.counter])
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message
sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message
self.message = self.new_message
def update_message(self):
self.counter = (self.counter + 1)%len(self.symbols)
self.print_message()
# -----------------------------
def ipCoords(elemType, nodalCoordinates):
#
# returns IP coordinates for a given element
# -----------------------------
nodeWeightsPerNode = {
7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0],
[ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0],
[ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0],
[ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0],
[ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0],
[ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0],
[ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0],
[ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ],
117: [ [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] ],
125: [ [ 3.0, 0.0, 0.0, 4.0, 1.0, 4.0],
[ 0.0, 3.0, 0.0, 4.0, 4.0, 1.0],
[ 0.0, 0.0, 3.0, 1.0, 4.0, 4.0],],
136: [ [42.0, 15.0, 15.0, 14.0, 5.0, 5.0],
[15.0, 42.0, 15.0, 5.0, 14.0, 5.0],
[15.0, 15.0, 42.0, 5.0, 5.0, 14.0],
[14.0, 5.0, 5.0, 42.0, 15.0, 15.0],
[ 5.0, 14.0, 5.0, 15.0, 42.0, 15.0],
[ 5.0, 5.0, 14.0, 15.0, 15.0, 42.0] ],
}
ipCoordinates = [[0.0,0.0,0.0] for i in range(len(nodalCoordinates))]
for ip in range(len(nodeWeightsPerNode[elemType])):
for node in range(len(nodeWeightsPerNode[elemType][ip])):
for i in range(3):
ipCoordinates[ip][i] += nodeWeightsPerNode[elemType][ip][node] * nodalCoordinates[node][i]
for i in range(3):
ipCoordinates[ip][i] /= sum(nodeWeightsPerNode[elemType][ip])
return ipCoordinates
# -----------------------------
def substituteLocation(string, mesh, coords):
#
# do variable interpolation in group and filter strings
# -----------------------------
substitute = string
substitute = substitute.replace('elem', str(mesh[0]))
substitute = substitute.replace('node', str(mesh[1]))
substitute = substitute.replace('grain', str(mesh[2]))
substitute = substitute.replace('x', '%.6g'%coords[0])
substitute = substitute.replace('y', '%.6g'%coords[1])
substitute = substitute.replace('z', '%.6g'%coords[2])
return substitute
# -----------------------------
def heading(glue,parts):
#
# joins pieces from parts by glue. second to last entry in pieces tells multiplicity
# -----------------------------
header = []
for pieces in parts:
if pieces[-2] == 0:
del pieces[-2]
header.append(glue.join(map(str,pieces)))
return header
# -----------------------------
def illegalMap(map, label):
#
# answers whether map is illegal to be applied to data what
# -----------------------------
illegal = {
'eulerangles': ['min','max','avg','sum'],
'defgrad': ['min','max','avg','sum'],
'orientation': ['min','max', 'sum'],
}
return label.lower() in illegal and map in illegal[label.lower()]
# -----------------------------
def mapIncremental(label, mapping, N, base, new):
#
# applies the function defined by "mapping"
# (can be either 'min','max','avg', 'sum', or user specified)
# to a list of data
# -----------------------------
if illegalMap(mapping,label): # for illegal mappings:...
return ['n/a'*len(base)] # ...return 'n/a'
else:
if mapping in ['min','max','avg','sum']:
mapped = map(
{ 'min': lambda n,b,a: min(b,a),
'max': lambda n,b,a: max(b,a),
'avg': lambda n,b,a: (n*b+a)/(n+1),
'sum': lambda n,b,a: b+a,
}[mapping],[N]*len(base),base,new) # map one of the standard functions to data
if label.lower() == 'orientation': # orientation is special case:...
orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion
mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion
else:
try:
mapped = eval('map(%s,N*len(base),base,new)'%map) # map user defined function to colums in chunks
except:
mapped = ['n/a'*len(base)]
return mapped
# -----------------------------
def OpenPostfile(name,type):
#
# open postfile with extrapolation mode "translate"
# -----------------------------
p = {\
'spectral': MPIEspectral_result,\
'marc': post_open,\
}[type]\
(name+
{\
'marc': '.t16',\
'spectral': '.spectralOut',\
}[type]
)
p.extrapolation('translate')
p.moveto(1)
return p
# -----------------------------
def ParseOutputFormat(filename,what,me):
#
# parse .output* files in order to get a list of outputs
# -----------------------------
format = {'outputs':{},'specials':{'brothers':[]}}
for prefix in ['']+map(str,range(1,17)):
if os.path.exists(prefix+filename+'.output'+what):
break
try:
file = open(prefix+filename+'.output'+what)
content = file.readlines()
file.close()
except:
return format
tag = ''
tagID = 0
for line in content:
if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments
continue
m = re.match("\[(.+)\]",line) # look for block indicator
if m: # next section
tag = m.group(1)
tagID += 1
format['specials']['brothers'].append(tag)
if tag == me or (me.isdigit() and tagID == int(me)):
format['specials']['_id'] = tagID
format['outputs'] = []
tag = me
else: # data from section
if tag == me:
(output,length) = line.split()
output.lower()
if length.isdigit():
length = int(length)
if re.match("\((.+)\)",output): # special data, e.g. (Ngrains)
format['specials'][output] = length
elif length > 0:
format['outputs'].append([output,length])
return format
# -----------------------------
def ParsePostfile(p,filename, outputFormat):
#
# parse postfile in order to get position and labels of outputs
# needs "outputFormat" for mapping of output names to postfile output indices
# -----------------------------
# --- build statistics
stat = { \
'IndexOfLabel': {}, \
'Title': p.title(), \
'Extrapolation': p.extrapolate, \
'NumberOfIncrements': p.increments(), \
'NumberOfNodes': p.nodes(), \
'NumberOfNodalScalars': p.node_scalars(), \
'LabelOfNodalScalar': [None]*p.node_scalars() , \
'NumberOfElements': p.elements(), \
'NumberOfElementalScalars': p.element_scalars(), \
'LabelOfElementalScalar': [None]*p.element_scalars() , \
'NumberOfElementalTensors': p.element_tensors(), \
'LabelOfElementalTensor': [None]*p.element_tensors(), \
}
# --- find labels
for labelIndex in range(stat['NumberOfNodalScalars']):
label = p.node_scalar_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfNodalScalar'][labelIndex] = label
for labelIndex in range(stat['NumberOfElementalScalars']):
label = p.element_scalar_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfElementalScalar'][labelIndex] = label
for labelIndex in range(stat['NumberOfElementalTensors']):
label = p.element_tensor_label(labelIndex)
stat['IndexOfLabel'][label] = labelIndex
stat['LabelOfElementalTensor'][labelIndex] = label
if 'User Defined Variable 1' in stat['IndexOfLabel']:
stat['IndexOfLabel']['GrainCount'] = stat['IndexOfLabel']['User Defined Variable 1']
if 'GrainCount' in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all?
startIndex = stat['IndexOfLabel']['GrainCount'] - 1
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
# given in the .output* file
offset = 2
stat['LabelOfElementalScalar'][startIndex + offset] = 'HomogenizationCount'
for var in outputFormat['Homogenization']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%s'%(i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%s'%(var[0])] = startIndex + offset + 1
offset += var[1]
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Crystallite']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Constitutive']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
return stat
# -----------------------------
def SummarizePostfile(stat,where=sys.stdout):
# -----------------------------
where.write('\n\n')
where.write('title:\t%s'%stat['Title'] + '\n\n')
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n')
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n')
where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n')
return True
# -----------------------------
# MAIN FUNCTION STARTS HERE
# -----------------------------
# --- input parsing
parser = OptionParser(option_class=MyOption, usage='%prog [options] resultfile', description = """
Extract data from a .t16 (MSC.Marc) or .spectralOut results file.
List of output variables is given by options '--ns','--es','--et','--ho','--cr','--co'.
Filter and separations use 'elem','node','grain', and 'x','y','z' as key words.
Example:
1) get averaged results in slices perpendicular to x for all positive y coordinates
--filter 'y >= 0.0' --separation x --map 'avg'
2) global sum of squared data falling into first quadrant arc between R1 and R2
--filter 'x*x + y*y >= R1*R1 and x*x + y*y <= R2*R2' --map 'lambda list: sum([item*item for item in list])'
$Id$
""")
parser.add_option('-i','--info', action='store_true', dest='info', \
help='list contents of resultfile [%default]')
parser.add_option('-d','--dir', dest='directory', \
help='name of subdirectory to hold output [%default]')
parser.add_option('-s','--split', action='store_true', dest='separateFiles', \
help='split output per increment [%default]')
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
help='range of increments to output (start, end, step) [all]')
parser.add_option('-m','--map', dest='func', type='string', \
help='data reduction mapping ["%default"] out of min, max, avg, sum or user-lambda')
parser.add_option('-p','--type', dest='filetype', type='string', \
help = 'type of result file [%default]')
group_material = OptionGroup(parser,'Material identifier')
group_special = OptionGroup(parser,'Special outputs')
group_general = OptionGroup(parser,'General outputs')
group_material.add_option('--homogenization', dest='homog', type='string', \
help='homogenization identifier (as string or integer [%default])')
group_material.add_option('--crystallite', dest='cryst', type='string', \
help='crystallite identifier (as string or integer [%default])')
group_material.add_option('--phase', dest='phase', type='string', \
help='phase identifier (as string or integer [%default])')
group_special.add_option('-t','--time', action='store_true', dest='time', \
help='output time of increment [%default]')
group_special.add_option('-f','--filter', dest='filter', type='string', \
help='condition(s) to filter results [%default]')
group_special.add_option('--separation', action='extend', dest='separation', type='string', \
help='properties to separate results [%default]')
group_special.add_option('--sort', action='extend', dest='sort', type='string', \
help='properties to sort results [%default]')
group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \
help='list of nodal scalars to extract')
group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \
help='list of elemental scalars to extract')
group_general.add_option('--et', action='extend', dest='elementalTensor', type='string', \
help='list of elemental tensors to extract')
group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \
help='list of homogenization results to extract')
group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \
help='list of crystallite results to extract')
group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \
help='list of constitutive results to extract')
parser.add_option_group(group_material)
parser.add_option_group(group_general)
parser.add_option_group(group_special)
parser.set_defaults(info = False)
parser.set_defaults(directory = 'postProc')
parser.set_defaults(filetype = 'marc')
parser.set_defaults(func = 'avg')
parser.set_defaults(homog = '1')
parser.set_defaults(cryst = '1')
parser.set_defaults(phase = '1')
parser.set_defaults(filter = '')
parser.set_defaults(separation = [])
parser.set_defaults(sort = [])
parser.set_defaults(inc = False)
parser.set_defaults(time = False)
parser.set_defaults(separateFiles = False)
(options, files) = parser.parse_args()
options.filetype = options.filetype.lower()
if options.filetype == 'marc':
try:
file = open('%s/../MSCpath'%os.path.dirname(os.path.realpath(sys.argv[0])))
MSCpath = os.path.normpath(file.readline().strip())
file.close()
except:
MSCpath = '/msc'
for release,subdirs in sorted(releases.items(),reverse=True):
for subdir in subdirs:
libPath = '%s/mentat%s/shlib/%s'%(MSCpath,release,subdir)
if os.path.exists(libPath):
sys.path.append(libPath)
break
else:
continue
break
try:
from py_post import *
except:
print('error: no valid Mentat release found in %s'%MSCpath)
sys.exit(-1)
else:
def post_open():
return
# --- sanity checks
if files == []:
parser.print_help()
parser.error('no file specified...')
if not os.path.exists(files[0]):
parser.print_help()
parser.error('invalid file "%s" specified...'%files[0])
if options.filetype not in ['marc','spectral']:
parser.print_help()
parser.error('file type "%s" not supported...'%options.filetype)
if options.constitutiveResult and not options.phase:
parser.print_help()
parser.error('constitutive results require phase...')
if options.nodalScalar and ( options.elementalScalar or options.elementalTensor
or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ):
parser.print_help()
parser.error('not allowed to mix nodal with elemental results...')
if not options.nodalScalar: options.nodalScalar = []
if not options.elementalScalar: options.elementalScalar = []
if not options.elementalTensor: options.elementalTensor = []
if not options.homogenizationResult: options.homogenizationResult = []
if not options.crystalliteResult: options.crystalliteResult = []
if not options.constitutiveResult: options.constitutiveResult = []
# --- start background messaging
bg = backgroundMessage()
bg.start()
# --- parse .output and .t16 files
filename = os.path.splitext(files[0])[0]
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory
if not os.path.isdir(dirname):
os.mkdir(dirname,0755)
outputFormat = {}
me = {
'Homogenization': options.homog,
'Crystallite': options.cryst,
'Constitutive': options.phase,
}
bg.set_message('parsing .output files...')
for what in me:
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
if not '_id' in outputFormat[what]['specials']:
print "'%s' not found in <%s>"%(me[what], what)
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
sys.exit(1)
bg.set_message('opening result file...')
p = OpenPostfile(filename,options.filetype)
bg.set_message('parsing result file...')
stat = ParsePostfile(p, filename, outputFormat)
if options.filetype == 'marc':
stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0)
# --- sanity check for output variables
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different
for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationResult','crystalliteResult','constitutiveResult']:
if eval('options.%s'%opt):
for label in eval('options.%s'%opt):
if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and not label in stat['IndexOfLabel']) \
or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \
and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])):
parser.error('%s "%s" unknown...'%(opt,label))
# --- output info
if options.info:
if options.filetype == 'marc':
print '\n\nMentat release %s'%release
SummarizePostfile(stat,sys.stderr)
print '\nUser Defined Outputs'
for what in me:
print '\n ',what,':'
for output in outputFormat[what]['outputs']:
print ' ',output
sys.exit(0)
# --- get output data from .t16 file
increments = range(stat['NumberOfIncrements'])
if options.filetype == 'marc':
offset_inc = 1
else:
offset_inc = 0
if options.range:
options.range = list(options.range)
increments = range( max(0,options.range[0]),
min(stat['NumberOfIncrements'],options.range[1]+1),
options.range[2])
# --------------------------- build group membership --------------------------------
p.moveto(increments[0]+offset_inc)
index = {}
groups = []
groupCount = 0
memberCount = 0
if options.nodalScalar:
for n in xrange(stat['NumberOfNodes']):
if n%1000 == 0:
bg.set_message('scan node %i...'%n)
myNodeID = p.node_id(n)
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
myElemID = 0
myGrainID = 0
# --- filter valid locations
filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates an expression that is only true for the locations specified by options.filter
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
# --- group data locations
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
if grp not in index: # create a new group if not yet present
index[grp] = groupCount
groups[groupCount] = [[0,0,0,0.0,0.0,0.0]] # initialize with avg location
groupCount += 1
groups[index[grp]][0] = mapIncremental('','avg',
len(groups[index[grp]])-1,
groups[index[grp]][0],
[myElemID,myNodeID,myGrainID] + myNodeCoordinates) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myGrainID]) # append a new list defining each group member
memberCount += 1
else:
for e in xrange(stat['NumberOfElements']):
if e%1000 == 0:
bg.set_message('scan elem %i...'%e)
myElemID = p.element_id(e)
myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items))))
for n,myNodeID in enumerate(p.element(e).items):
for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))
or 1):
myGrainID = g + 1
# --- filter valid locations
filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
# --- group data locations
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
if grp not in index: # create a new group if not yet present
index[grp] = groupCount
groups.append([[0,0,0,0.0,0.0,0.0]]) # initialize with avg location
groupCount += 1
groups[index[grp]][0] = mapIncremental('','avg',
len(groups[index[grp]])-1,
groups[index[grp]][0],
[myElemID,myNodeID,myGrainID] + myIpCoordinates[n]) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myGrainID,n]) # append a new list defining each group member
memberCount += 1
# --------------------------- prevent avg of e,n,g --------------------------------
for grp in xrange(len(groups)):
if len(groups[grp]) > 2: # more than one member in group? (avgLoc + 2+ entries?)
groups[grp][0][:3] = ['n/a','n/a','n/a'] # no avg value for elem, ip, or grain meaningful
# --------------------------- sort groups --------------------------------
where = {
'elem': 0,
'node': 1,
'grain': 2,
'x': 3,
'y': 4,
'z': 5,
}
sortProperties = []
for item in options.sort:
if item not in options.separation:
sortProperties.append(item)
theKeys = []
for criterium in options.separation+sortProperties:
if criterium in where:
theKeys.append('x[0][%i]'%where[criterium])
sortKeys = eval('lambda x:(%s)'%(','.join(theKeys)))
bg.set_message('sorting groups...')
groups.sort(key = sortKeys) # in-place sorting to save mem
fileOpen = False
assembleHeader = True
header = []
standard = ['inc'] + \
{True: ['time'],
False:[]}[options.time] + \
['elem','node','grain'] + \
{True: ['node.x','node.y','node.z'],
False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []]
# --------------------------- loop over increments --------------------------------
time_start = time.time()
for incCount,increment in enumerate(increments):
p.moveto(increment+offset_inc)
# --------------------------- file management --------------------------------
if options.separateFiles:
if fileOpen:
file.close()
fileOpen = False
outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],increment)')
else:
outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1])
if not fileOpen:
file = open(outFilename,'w')
fileOpen = True
file.write('2\theader\n')
file.write('$Id$\n')
headerWritten = False
file.flush()
# --------------------------- read and map data per group --------------------------------
member = 0
for i,group in enumerate(groups):
N = 0 # group member counter
for (e,n,g,n_local) in group[1:]: # loop over group members
member += 1
if member%1000 == 0:
time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment))
newby = [] # current member's data
if options.elementalScalar:
for label in options.elementalScalar:
if assembleHeader:
header += [label.replace(' ','')]
newby.append({'label':label,
'len':1,
'content':[ p.element_scalar(e,stat['IndexOfLabel'][label])[n_local].value ]})
if options.elementalTensor:
for label in options.elementalTensor:
if assembleHeader:
header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']])
myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n_local]
newby.append({'label':label,
'len':length,
'content':[ myTensor.intensity,
myTensor.t11, myTensor.t22, myTensor.t33,
myTensor.t12, myTensor.t23, myTensor.t13,
]})
if options.homogenizationResult or \
options.crystalliteResult or \
options.constitutiveResult:
for (label,resultType) in zip(options.homogenizationResult +
options.crystalliteResult +
options.constitutiveResult,
['Homogenization']*len(options.homogenizationResult) +
['Crystallite']*len(options.crystalliteResult) +
['Constitutive']*len(options.constitutiveResult)
):
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
length = int(outputFormat[resultType]['outputs'][outputIndex][1])
if resultType == 'Homogenization':
thisHead = heading('_',[[component,label] for component in range(int(length>1),length+int(length>1))])
else:
thisHead = heading('_',[[g,component,label] for component in range(int(length>1),length+int(length>1))])
if assembleHeader: header += thisHead
newby.append({'label':label,
'len':length,
'content':[ p.element_scalar(e,stat['IndexOfLabel'][head])[n_local].value
for head in thisHead ]})
assembleHeader = False
if N == 0: mappedResult = [0.0]*len(header)
pos = 0
for chunk in newby:
mappedResult[pos:pos+chunk['len']] = mapIncremental(chunk['label'],options.func,
N,mappedResult[pos:pos+chunk['len']],chunk['content'])
pos += chunk['len']
N += 1
# --- write data row to file ---
if not headerWritten:
file.write('\t'.join(standard + header) + '\n')
headerWritten = True
file.write('\t'.join(map(str,[increment] + \
{True:[p.time],False:[]}[options.time] + \
group[0] + \
mappedResult)
) + '\n')
if fileOpen:
file.close()
# --------------------------- DONE --------------------------------