From 482be626e0f165c2803f3a8d118dcc5530a4ea1b Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Tue, 12 Apr 2011 17:46:35 +0000 Subject: [PATCH] major recoding. now with tiny memory footprint and better guessing of remaining time. --- processing/post/postResults | 1098 +++++++++++++++++------------------ 1 file changed, 546 insertions(+), 552 deletions(-) diff --git a/processing/post/postResults b/processing/post/postResults index f1dc03492..74693792b 100755 --- a/processing/post/postResults +++ b/processing/post/postResults @@ -1,6 +1,6 @@ #!/usr/bin/env python -import os, sys, math, re, threading, time, struct +import pdb, os, sys, gc, math, re, threading, time, struct from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP releases = {'2010':['linux64',''], @@ -11,7 +11,7 @@ releases = {'2010':['linux64',''], # ----------------------------- -class vector: # mimic py_post node object +class vector: # mimic py_post node object # ----------------------------- x,y,z = [None,None,None] @@ -31,7 +31,7 @@ class element: # mimic py_post element object self.type = type # ----------------------------- -class elemental_scalar: # mimic py_post element_scalar object +class elemental_scalar: # mimic py_post element_scalar object # ----------------------------- id = None value = None @@ -42,7 +42,7 @@ class elemental_scalar: # mimic py_post element_scalar object # ----------------------------- -class MPIEspectral_result: # mimic py_post result object +class MPIEspectral_result: # mimic py_post result object # ----------------------------- file = None @@ -56,6 +56,7 @@ class MPIEspectral_result: # mimic py_post result object extrapolate = '' N_increments = 0 increment = 0 + time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to N_nodes = 0 N_node_scalars = 0 N_elements = 0 @@ -65,7 +66,7 @@ class MPIEspectral_result: # mimic py_post result object def __init__(self,filename): self.file = open(filename, 'rb') - + self.theTitle = self._keyedString('load') self.wd = self._keyedString('workingdir') self.geometry = self._keyedString('geometry') @@ -75,7 +76,7 @@ class MPIEspectral_result: # mimic py_post result object self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1) self.N_elements = self.resolution[0]*self.resolution[1]*self.resolution[2] self.dimension = self._keyedPackedArray('dimension',3,'d') - + self.file.seek(0) self.dataOffset = self.file.read(2048).find('eoh')+7 @@ -145,18 +146,18 @@ class MPIEspectral_result: # mimic py_post result object return vector([self.dimension[0] * (n%a) / self.resolution[0], self.dimension[1] * ((n/a)%b) / self.resolution[1], self.dimension[2] * ((n/a/b)%c) / self.resolution[2], - ]) + ]) def element_id(self,e): return e+1 - + def element(self,e): a = self.resolution[0]+1 b = self.resolution[1]+1 c = self.resolution[2]+1 basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a basenode2 = basenode+a*b - return (element([basenode ,basenode+1 ,basenode+a+1 ,basenode+a, + return (element([basenode ,basenode +1,basenode +a+1,basenode +a, basenode2,basenode2+1,basenode2+a+1,basenode2+a, ],117)) @@ -193,56 +194,56 @@ class MyOption(Option): # ----------------------------- # used for definition of new option parser action 'extend', which enables to take multiple option arguments # taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) + + ACTIONS = Option.ACTIONS + ("extend",) + STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) + TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) + ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) + def take_action(self, action, dest, opt, value, values, parser): + if action == "extend": + lvalue = value.split(",") + values.ensure_value(dest, []).extend(lvalue) + else: + Option.take_action(self, action, dest, opt, value, values, parser) - + # ----------------------------- class backgroundMessage(threading.Thread): # ----------------------------- - - def __init__(self): - threading.Thread.__init__(self) - self.message = '' - self.new_message = '' - self.counter = 0 - self.symbols = ['- ', '\ ', '| ', '/ ',] - self.waittime = 0.5 - - def __quit__(self): - length = len(self.message) + len(self.symbols[self.counter]) - sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) - sys.stderr.write('') - - def run(self): - while not threading.enumerate()[0]._Thread__stopped: - time.sleep(self.waittime) - self.update_message() - self.__quit__() + + def __init__(self): + threading.Thread.__init__(self) + self.message = '' + self.new_message = '' + self.counter = 0 + self.symbols = ['- ', '\ ', '| ', '/ ',] + self.waittime = 0.5 + + def __quit__(self): + length = len(self.message) + len(self.symbols[self.counter]) + sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) + sys.stderr.write('') + + def run(self): + while not threading.enumerate()[0]._Thread__stopped: + time.sleep(self.waittime) + self.update_message() + self.__quit__() - def set_message(self, new_message): - self.new_message = new_message - self.print_message() + def set_message(self, new_message): + self.new_message = new_message + self.print_message() + + def print_message(self): + length = len(self.message) + len(self.symbols[self.counter]) + sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message + sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message + self.message = self.new_message - def print_message(self): - length = len(self.message) + len(self.symbols[self.counter]) - sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message - sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message - self.message = self.new_message - - def update_message(self): - self.counter = (self.counter + 1)%len(self.symbols) - self.print_message() + def update_message(self): + self.counter = (self.counter + 1)%len(self.symbols) + self.print_message() # ----------------------------- @@ -252,32 +253,32 @@ def ipCoords(elemType, nodalCoordinates): # ----------------------------- nodeWeightsPerNode = { - 7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0], - [ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0], - [ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0], - [ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0], - [ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0], - [ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0], - [ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0], - [ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ], - 117: [ [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] ], - 125: [ [ 3.0, 0.0, 0.0, 4.0, 1.0, 4.0], - [ 0.0, 3.0, 0.0, 4.0, 4.0, 1.0], - [ 0.0, 0.0, 3.0, 1.0, 4.0, 4.0],], - 136: [ [42.0, 15.0, 15.0, 14.0, 5.0, 5.0], - [15.0, 42.0, 15.0, 5.0, 14.0, 5.0], - [15.0, 15.0, 42.0, 5.0, 5.0, 14.0], - [14.0, 5.0, 5.0, 42.0, 15.0, 15.0], - [ 5.0, 14.0, 5.0, 15.0, 42.0, 15.0], - [ 5.0, 5.0, 14.0, 15.0, 15.0, 42.0] ], - } + 7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0], + [ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0], + [ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0], + [ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0], + [ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0], + [ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0], + [ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0], + [ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ], + 117: [ [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] ], + 125: [ [ 3.0, 0.0, 0.0, 4.0, 1.0, 4.0], + [ 0.0, 3.0, 0.0, 4.0, 4.0, 1.0], + [ 0.0, 0.0, 3.0, 1.0, 4.0, 4.0],], + 136: [ [42.0, 15.0, 15.0, 14.0, 5.0, 5.0], + [15.0, 42.0, 15.0, 5.0, 14.0, 5.0], + [15.0, 15.0, 42.0, 5.0, 5.0, 14.0], + [14.0, 5.0, 5.0, 42.0, 15.0, 15.0], + [ 5.0, 14.0, 5.0, 15.0, 42.0, 15.0], + [ 5.0, 5.0, 14.0, 15.0, 15.0, 42.0] ], + } ipCoordinates = [[0.0,0.0,0.0] for i in range(len(nodalCoordinates))] for ip in range(len(nodeWeightsPerNode[elemType])): @@ -290,27 +291,6 @@ def ipCoords(elemType, nodalCoordinates): return ipCoordinates -# ----------------------------- -def sortBySeparation(dataArray, criteria, offset): -# -# sorting of groupValue array according to list of criteria -# ----------------------------- - where = { - 'elem': 1, - 'node': 2, - 'grain': 3, - 'x': 4, - 'y': 5, - 'z': 6, - } - - theKeys = [] - for criterium in criteria: - if criterium in where: - theKeys.append('x[%i]'%(offset+where[criterium])) - exec('sortedArray = sorted(dataArray,key=lambda x:(%s))'%(','.join(theKeys))) - return sortedArray - # ----------------------------- def substituteLocation(string, mesh, coords): @@ -327,71 +307,89 @@ def substituteLocation(string, mesh, coords): return substitute + # ----------------------------- -def average(theList): +def heading(glue,parts): # -# calcs the average of a list of numbers +# joins pieces from parts by glue. second to last entry in pieces tells multiplicity # ----------------------------- - - return sum(map(float,theList))/len(theList) + + header = [] + for pieces in parts: + if pieces[-2] == 0: + del pieces[-2] + header.append(glue.join(map(str,pieces))) + + return header # ----------------------------- -def mapFunc(label, chunks, func): +def illegalMap(map, label): # -# applies the function defined by "func" -# (can be either 'min','max','avg', 'sum', or user specified) -# to a list of lists of data +# answers whether map is illegal to be applied to data what # ----------------------------- illegal = { 'eulerangles': ['min','max','avg','sum'], 'defgrad': ['min','max','avg','sum'], - 'orientation': ['min','max','sum'], + 'orientation': ['min','max', 'sum'], } - if label.lower() in illegal and func in illegal[label.lower()]: # for illegal mappings:... - return ['n/a' for i in range(len(chunks[0]))] # ...return 'n/a' + return label.lower() in illegal and map in illegal[label.lower()] + + +# ----------------------------- +def mapIncremental(label, mapping, N, base, new): +# +# applies the function defined by "mapping" +# (can be either 'min','max','avg', 'sum', or user specified) +# to a list of data +# ----------------------------- + + if illegalMap(mapping,label): # for illegal mappings:... + return ['n/a'*len(base)] # ...return 'n/a' else: - if func in ['min','max','avg']: - mapped = [{ 'min': lambda x: min(x), - 'max': lambda x: max(x), - 'avg': lambda x: average(x), - 'sum': lambda x: sum(x), - }[func](column) for column in zip(*chunks)] # map one of the standard functions to colums in chunks - if label.lower() == 'orientation': # orientation is special case:... - orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion - mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion + if mapping in ['min','max','avg','sum']: + mapped = map( + { 'min': lambda n,b,a: min(b,a), + 'max': lambda n,b,a: max(b,a), + 'avg': lambda n,b,a: (n*b+a)/(n+1), + 'sum': lambda n,b,a: b+a, + }[mapping],[N]*len(base),base,new) # map one of the standard functions to data + if label.lower() == 'orientation': # orientation is special case:... + orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion + mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion else: - try: - mapped = eval('map(%s,zip(*chunks))'%func) # map user defined function to colums in chunks - except: - mapped = ['n/a' for i in range(len(chunks[0]))] - + try: + mapped = eval('map(%s,N*len(base),base,new)'%map) # map user defined function to colums in chunks + except: + mapped = ['n/a'*len(base)] + return mapped + # ----------------------------- def OpenPostfile(name,type): # # open postfile with extrapolation mode "translate" # ----------------------------- - p = {\ - 'spectral': MPIEspectral_result,\ - 'marc': post_open,\ - }[type]\ - (name+ - {\ - 'marc': '.t16',\ - 'spectral': '.spectralOut',\ - }[type] - ) - p.extrapolation('translate') - p.moveto(1) - - return p + p = {\ + 'spectral': MPIEspectral_result,\ + 'marc': post_open,\ + }[type]\ + (name+ + {\ + 'marc': '.t16',\ + 'spectral': '.spectralOut',\ + }[type] + ) + p.extrapolation('translate') + p.moveto(1) + + return p # ----------------------------- @@ -400,43 +398,43 @@ def ParseOutputFormat(filename,what,me): # parse .output* files in order to get a list of outputs # ----------------------------- - format = {'outputs':{},'specials':{'brothers':[]}} - for prefix in ['']+map(str,range(1,17)): - if os.path.exists(prefix+filename+'.output'+what): - break - try: - file = open(prefix+filename+'.output'+what) - content = file.readlines() - file.close() - except: - return format - - tag = '' - tagID = 0 - for line in content: - if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments - continue - m = re.match("\[(.+)\]",line) # look for block indicator - if m: # next section - tag = m.group(1) - tagID += 1 - format['specials']['brothers'].append(tag) - if tag == me or (me.isdigit() and tagID == int(me)): - format['specials']['_id'] = tagID - format['outputs'] = [] - tag = me - else: # data from section - if tag == me: - (output,length) = line.split() - output.lower() - if length.isdigit(): - length = int(length) - if re.match("\((.+)\)",output): # special data, e.g. (Ngrains) - format['specials'][output] = length - elif length > 0: - format['outputs'].append([output,length]) + format = {'outputs':{},'specials':{'brothers':[]}} + for prefix in ['']+map(str,range(1,17)): + if os.path.exists(prefix+filename+'.output'+what): + break + try: + file = open(prefix+filename+'.output'+what) + content = file.readlines() + file.close() + except: return format + tag = '' + tagID = 0 + for line in content: + if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments + continue + m = re.match("\[(.+)\]",line) # look for block indicator + if m: # next section + tag = m.group(1) + tagID += 1 + format['specials']['brothers'].append(tag) + if tag == me or (me.isdigit() and tagID == int(me)): + format['specials']['_id'] = tagID + format['outputs'] = [] + tag = me + else: # data from section + if tag == me: + (output,length) = line.split() + output.lower() + if length.isdigit(): + length = int(length) + if re.match("\((.+)\)",output): # special data, e.g. (Ngrains) + format['specials'][output] = length + elif length > 0: + format['outputs'].append([output,length]) + return format + # ----------------------------- def ParsePostfile(p,filename, outputFormat): @@ -445,99 +443,99 @@ def ParsePostfile(p,filename, outputFormat): # needs "outputFormat" for mapping of output names to postfile output indices # ----------------------------- - # --- build statistics + # --- build statistics - stat = { \ - 'IndexOfLabel': {}, \ - 'Title': p.title(), \ - 'Extrapolation': p.extrapolate, \ - 'NumberOfIncrements': p.increments(), \ - 'NumberOfNodes': p.nodes(), \ - 'NumberOfNodalScalars': p.node_scalars(), \ - 'LabelOfNodalScalar': [None]*p.node_scalars() , \ - 'NumberOfElements': p.elements(), \ - 'NumberOfElementalScalars': p.element_scalars(), \ - 'LabelOfElementalScalar': [None]*p.element_scalars() , \ - 'NumberOfElementalTensors': p.element_tensors(), \ - 'LabelOfElementalTensor': [None]*p.element_tensors(), \ - } + stat = { \ + 'IndexOfLabel': {}, \ + 'Title': p.title(), \ + 'Extrapolation': p.extrapolate, \ + 'NumberOfIncrements': p.increments(), \ + 'NumberOfNodes': p.nodes(), \ + 'NumberOfNodalScalars': p.node_scalars(), \ + 'LabelOfNodalScalar': [None]*p.node_scalars() , \ + 'NumberOfElements': p.elements(), \ + 'NumberOfElementalScalars': p.element_scalars(), \ + 'LabelOfElementalScalar': [None]*p.element_scalars() , \ + 'NumberOfElementalTensors': p.element_tensors(), \ + 'LabelOfElementalTensor': [None]*p.element_tensors(), \ + } - # --- find labels + # --- find labels - for labelIndex in range(stat['NumberOfNodalScalars']): - label = p.node_scalar_label(labelIndex) - stat['IndexOfLabel'][label] = labelIndex - stat['LabelOfNodalScalar'][labelIndex] = label + for labelIndex in range(stat['NumberOfNodalScalars']): + label = p.node_scalar_label(labelIndex) + stat['IndexOfLabel'][label] = labelIndex + stat['LabelOfNodalScalar'][labelIndex] = label - for labelIndex in range(stat['NumberOfElementalScalars']): - label = p.element_scalar_label(labelIndex) - stat['IndexOfLabel'][label] = labelIndex - stat['LabelOfElementalScalar'][labelIndex] = label + for labelIndex in range(stat['NumberOfElementalScalars']): + label = p.element_scalar_label(labelIndex) + stat['IndexOfLabel'][label] = labelIndex + stat['LabelOfElementalScalar'][labelIndex] = label - for labelIndex in range(stat['NumberOfElementalTensors']): - label = p.element_tensor_label(labelIndex) - stat['IndexOfLabel'][label] = labelIndex - stat['LabelOfElementalTensor'][labelIndex] = label + for labelIndex in range(stat['NumberOfElementalTensors']): + label = p.element_tensor_label(labelIndex) + stat['IndexOfLabel'][label] = labelIndex + stat['LabelOfElementalTensor'][labelIndex] = label + + if 'User Defined Variable 1' in stat['IndexOfLabel']: + stat['IndexOfLabel']['GrainCount'] = stat['IndexOfLabel']['User Defined Variable 1'] + + if 'GrainCount' in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all? + startIndex = stat['IndexOfLabel']['GrainCount'] - 1 - if 'User Defined Variable 1' in stat['IndexOfLabel']: - stat['IndexOfLabel']['GrainCount'] = stat['IndexOfLabel']['User Defined Variable 1'] + # We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file + # Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels + # given in the .output* file - if 'GrainCount' in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all? - startIndex = stat['IndexOfLabel']['GrainCount'] - 1 - - # We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file - # Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels - # given in the .output* file - - offset = 2 - stat['LabelOfElementalScalar'][startIndex + offset] = 'HomogenizationCount' - for var in outputFormat['Homogenization']['outputs']: - if var[1] > 1: - for i in range(var[1]): - stat['IndexOfLabel']['%i_%s'%(i+1,var[0])] = startIndex + offset + (i+1) - else: - stat['IndexOfLabel']['%s'%(var[0])] = startIndex + offset + 1 - offset += var[1] - - for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']): - stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1 - offset += 1 - for var in outputFormat['Crystallite']['outputs']: - if var[1] > 1: - for i in range(var[1]): - stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1) - else: - stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1 - offset += var[1] + offset = 2 + stat['LabelOfElementalScalar'][startIndex + offset] = 'HomogenizationCount' + for var in outputFormat['Homogenization']['outputs']: + if var[1] > 1: + for i in range(var[1]): + stat['IndexOfLabel']['%i_%s'%(i+1,var[0])] = startIndex + offset + (i+1) + else: + stat['IndexOfLabel']['%s'%(var[0])] = startIndex + offset + 1 + offset += var[1] + + for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']): + stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1 + offset += 1 + for var in outputFormat['Crystallite']['outputs']: + if var[1] > 1: + for i in range(var[1]): + stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1) + else: + stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1 + offset += var[1] - stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1 - offset += 1 - for var in outputFormat['Constitutive']['outputs']: - if var[1] > 1: - for i in range(var[1]): - stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1) - else: - stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1 - offset += var[1] - - return stat + stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1 + offset += 1 + for var in outputFormat['Constitutive']['outputs']: + if var[1] > 1: + for i in range(var[1]): + stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1) + else: + stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1 + offset += var[1] + + return stat # ----------------------------- def SummarizePostfile(stat,where=sys.stdout): # ----------------------------- - where.write('\n\n') - where.write('title:\t%s'%stat['Title'] + '\n\n') - where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n') - where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n') - where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n') - where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n') - where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n') - where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') - where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') - - return True + where.write('\n\n') + where.write('title:\t%s'%stat['Title'] + '\n\n') + where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n') + where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n') + where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n') + where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n') + where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n') + where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n') + where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n') + + return True # ----------------------------- @@ -562,49 +560,49 @@ $Id$ """) parser.add_option('-i','--info', action='store_true', dest='info', \ - help='list contents of resultfile [%default]') + help='list contents of resultfile [%default]') parser.add_option('-d','--dir', dest='directory', \ - help='name of subdirectory to hold output [%default]') + help='name of subdirectory to hold output [%default]') parser.add_option('-s','--split', action='store_true', dest='separateFiles', \ - help='split output per increment [%default]') + help='split output per increment [%default]') parser.add_option('-r','--range', dest='range', type='int', nargs=3, \ - help='range of increments to output (start, end, step) [all]') + help='range of increments to output (start, end, step) [all]') parser.add_option('-m','--map', dest='func', type='string', \ - help='data reduction mapping ["%default"] out of min, max, avg, sum or user-lambda') + help='data reduction mapping ["%default"] out of min, max, avg, sum or user-lambda') parser.add_option('-p','--type', dest='filetype', type='string', \ - help = 'type of result file [%default]') + help = 'type of result file [%default]') group_material = OptionGroup(parser,'Material identifier') group_special = OptionGroup(parser,'Special outputs') group_general = OptionGroup(parser,'General outputs') group_material.add_option('--homogenization', dest='homog', type='string', \ - help='homogenization identifier (as string or integer [%default])') + help='homogenization identifier (as string or integer [%default])') group_material.add_option('--crystallite', dest='cryst', type='string', \ - help='crystallite identifier (as string or integer [%default])') + help='crystallite identifier (as string or integer [%default])') group_material.add_option('--phase', dest='phase', type='string', \ - help='phase identifier (as string or integer [%default])') + help='phase identifier (as string or integer [%default])') group_special.add_option('-t','--time', action='store_true', dest='time', \ - help='output time of increment [%default]') + help='output time of increment [%default]') group_special.add_option('-f','--filter', dest='filter', type='string', \ - help='condition(s) to filter results [%default]') + help='condition(s) to filter results [%default]') group_special.add_option('--separation', action='extend', dest='separation', type='string', \ - help='properties to separate results [%default]') + help='properties to separate results [%default]') group_special.add_option('--sort', action='extend', dest='sort', type='string', \ - help='properties to sort results [%default]') + help='properties to sort results [%default]') group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \ - help='list of nodal scalars to extract') + help='list of nodal scalars to extract') group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \ - help='list of elemental scalars to extract') + help='list of elemental scalars to extract') group_general.add_option('--et', action='extend', dest='elementalTensor', type='string', \ - help='list of elemental tensors to extract') + help='list of elemental tensors to extract') group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \ - help='list of homogenization results to extract') + help='list of homogenization results to extract') group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \ - help='list of crystallite results to extract') + help='list of crystallite results to extract') group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \ - help='list of constitutive results to extract') + help='list of constitutive results to extract') parser.add_option_group(group_material) parser.add_option_group(group_general) @@ -629,28 +627,28 @@ parser.set_defaults(separateFiles = False) options.filetype = options.filetype.lower() if options.filetype == 'marc': - try: - file = open('%s/../MSCpath'%os.path.dirname(os.path.realpath(sys.argv[0]))) - MSCpath = os.path.normpath(file.readline().strip()) - file.close() - except: - MSCpath = '/msc' - - for release,subdirs in sorted(releases.items(),reverse=True): - for subdir in subdirs: - libPath = '%s/mentat%s/shlib/%s'%(MSCpath,release,subdir) - if os.path.exists(libPath): - sys.path.append(libPath) - break - else: - continue - break - - try: - from py_post import * - except: - print('error: no valid Mentat release found in %s'%MSCpath) - sys.exit(-1) + try: + file = open('%s/../MSCpath'%os.path.dirname(os.path.realpath(sys.argv[0]))) + MSCpath = os.path.normpath(file.readline().strip()) + file.close() + except: + MSCpath = '/msc' + + for release,subdirs in sorted(releases.items(),reverse=True): + for subdir in subdirs: + libPath = '%s/mentat%s/shlib/%s'%(MSCpath,release,subdir) + if os.path.exists(libPath): + sys.path.append(libPath) + break + else: + continue + break + + try: + from py_post import * + except: + print('error: no valid Mentat release found in %s'%MSCpath) + sys.exit(-1) else: def post_open(): return @@ -660,26 +658,32 @@ else: # --- sanity checks if files == []: - parser.print_help() - parser.error('no file specified...') + parser.print_help() + parser.error('no file specified...') if not os.path.exists(files[0]): - parser.print_help() - parser.error('invalid file "%s" specified...'%files[0]) + parser.print_help() + parser.error('invalid file "%s" specified...'%files[0]) if options.filetype not in ['marc','spectral']: - parser.print_help() - parser.error('file type "%s" not supported...'%options.filetype) + parser.print_help() + parser.error('file type "%s" not supported...'%options.filetype) if options.constitutiveResult and not options.phase: - parser.print_help() - parser.error('constitutive results require phase...') + parser.print_help() + parser.error('constitutive results require phase...') if options.nodalScalar and ( options.elementalScalar or options.elementalTensor or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ): - parser.print_help() - parser.error('not allowed to mix nodal with elemental results...') + parser.print_help() + parser.error('not allowed to mix nodal with elemental results...') +if not options.nodalScalar: options.nodalScalar = [] +if not options.elementalScalar: options.elementalScalar = [] +if not options.elementalTensor: options.elementalTensor = [] +if not options.homogenizationResult: options.homogenizationResult = [] +if not options.crystalliteResult: options.crystalliteResult = [] +if not options.constitutiveResult: options.constitutiveResult = [] # --- start background messaging @@ -691,59 +695,59 @@ bg.start() filename = os.path.splitext(files[0])[0] dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory if not os.path.isdir(dirname): - os.mkdir(dirname,0755) + os.mkdir(dirname,0755) outputFormat = {} me = { - 'Homogenization': options.homog, - 'Crystallite': options.cryst, - 'Constitutive': options.phase, + 'Homogenization': options.homog, + 'Crystallite': options.cryst, + 'Constitutive': options.phase, } bg.set_message('parsing .output files...') for what in me: - outputFormat[what] = ParseOutputFormat(filename, what, me[what]) - if not '_id' in outputFormat[what]['specials']: - print "'%s' not found in <%s>"%(me[what], what) - print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers'])) - sys.exit(1) - + outputFormat[what] = ParseOutputFormat(filename, what, me[what]) + if not '_id' in outputFormat[what]['specials']: + print "'%s' not found in <%s>"%(me[what], what) + print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers'])) + sys.exit(1) + bg.set_message('opening result file...') p = OpenPostfile(filename,options.filetype) bg.set_message('parsing result file...') stat = ParsePostfile(p, filename, outputFormat) if options.filetype == 'marc': - stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0) + stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0) # --- sanity check for output variables # for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary # for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationResult','crystalliteResult','constitutiveResult']: - if eval('options.%s'%opt): - for label in eval('options.%s'%opt): - if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and not label in stat['IndexOfLabel']) \ - or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \ - and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])): - parser.error('%s "%s" unknown...'%(opt,label)) + if eval('options.%s'%opt): + for label in eval('options.%s'%opt): + if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and not label in stat['IndexOfLabel']) \ + or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \ + and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])): + parser.error('%s "%s" unknown...'%(opt,label)) # --- output info if options.info: - if options.filetype == 'marc': - print '\n\nMentat release %s'%release + if options.filetype == 'marc': + print '\n\nMentat release %s'%release - SummarizePostfile(stat,sys.stderr) - - print '\nUser Defined Outputs' - for what in me: - print '\n ',what,':' - for output in outputFormat[what]['outputs']: - print ' ',output - - sys.exit(0) + SummarizePostfile(stat,sys.stderr) + + print '\nUser Defined Outputs' + for what in me: + print '\n ',what,':' + for output in outputFormat[what]['outputs']: + print ' ',output + + sys.exit(0) # --- get output data from .t16 file @@ -760,237 +764,227 @@ if options.range: min(stat['NumberOfIncrements'],options.range[1]+1), options.range[2]) + +# --------------------------- build group membership -------------------------------- + +p.moveto(increments[0]+offset_inc) +index = {} +groups = [] +groupCount = 0 +memberCount = 0 + +if options.nodalScalar: + for n in xrange(stat['NumberOfNodes']): + if n%1000 == 0: + bg.set_message('scan node %i...'%n) + myNodeID = p.node_id(n) + myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z] + myElemID = 0 + myGrainID = 0 + + # --- filter valid locations + + filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates an expression that is only true for the locations specified by options.filter + if filter != '' and not eval(filter): # for all filter expressions that are not true:... + continue # ... ignore this data point and continue with next + + # --- group data locations + + grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location + + if grp not in index: # create a new group if not yet present + index[grp] = groupCount + groups[groupCount] = [[0,0,0,0.0,0.0,0.0]] # initialize with avg location + groupCount += 1 + + groups[index[grp]][0] = mapIncremental('','avg', + len(groups[index[grp]])-1, + groups[index[grp]][0], + [myElemID,myNodeID,myGrainID] + myNodeCoordinates) # incrementally update average location + groups[index[grp]].append([myElemID,myNodeID,myGrainID]) # append a new list defining each group member + memberCount += 1 + +else: + for e in xrange(stat['NumberOfElements']): + if e%1000 == 0: + bg.set_message('scan elem %i...'%e) + myElemID = p.element_id(e) + myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items)))) + for n,myNodeID in enumerate(p.element(e).items): + for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value)) + or 1): + myGrainID = g + 1 + + # --- filter valid locations + + filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter + if filter != '' and not eval(filter): # for all filter expressions that are not true:... + continue # ... ignore this data point and continue with next + + # --- group data locations + + grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location + + if grp not in index: # create a new group if not yet present + index[grp] = groupCount + groups.append([[0,0,0,0.0,0.0,0.0]]) # initialize with avg location + groupCount += 1 + + groups[index[grp]][0] = mapIncremental('','avg', + len(groups[index[grp]])-1, + groups[index[grp]][0], + [myElemID,myNodeID,myGrainID] + myIpCoordinates[n]) # incrementally update average location + groups[index[grp]].append([myElemID,myNodeID,myGrainID,n]) # append a new list defining each group member + memberCount += 1 + +# --------------------------- prevent avg of e,n,g -------------------------------- + +for grp in xrange(len(groups)): + if len(groups[grp]) > 2: # more than one member in group? (avgLoc + 2+ entries?) + groups[grp][0][:3] = ['n/a','n/a','n/a'] # no avg value for elem, ip, or grain meaningful + +# --------------------------- sort groups -------------------------------- + +where = { + 'elem': 0, + 'node': 1, + 'grain': 2, + 'x': 3, + 'y': 4, + 'z': 5, + } + +sortProperties = [] +for item in options.sort: + if item not in options.separation: + sortProperties.append(item) + +theKeys = [] +for criterium in options.separation+sortProperties: + if criterium in where: + theKeys.append('x[0][%i]'%where[criterium]) + +sortKeys = eval('lambda x:(%s)'%(','.join(theKeys))) +bg.set_message('sorting groups...') +groups.sort(key = sortKeys) # in-place sorting to save mem + + fileOpen = False assembleHeader = True header = [] +standard = ['inc'] + \ + {True: ['time'], + False:[]}[options.time] + \ + ['elem','node','grain'] + \ + {True: ['node.x','node.y','node.z'], + False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []] -element_scalar = {} -element_tensor = {} - - -# --- loop over increments +# --------------------------- loop over increments -------------------------------- time_start = time.time() for incCount,increment in enumerate(increments): - p.moveto(increment+offset_inc) - data = {} - - if options.nodalScalar: - for n in range(stat['NumberOfNodes']): - if n%100 == 0: - time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount) - bg.set_message('(%02i:%02i:%02i) read node %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,n,increment)) - myNodeID = p.node_id(n) - myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z] - myElemID = 0 - myGrainID = 0 - - # --- filter valid locations - - filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates an expression that is only true for the locations specified by options.filter - if filter != '' and not eval(filter): # for all filter expressions that are not true:... - continue # ... ignore this data point and continue with next - - # --- group data locations - - group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location - if group not in data: # create a new group if not yet present - data[group] = [] - data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data - data[group][-1].append({ - 'label': 'location', - 'content': [myElemID,myNodeID,myGrainID] + myNodeCoordinates, - }) # first entry in this list always contains the location data - - # --- get data from t16 file - - for label in options.nodalScalar: - if assembleHeader: - header.append(label.replace(' ','')) - data[group][-1].append({ - 'label': label, - 'content': [ p.node_scalar(n,stat['IndexOfLabel'][label]) ], - }) - - assembleHeader = False - - else: - for e in range(stat['NumberOfElements']): - if e%100 == 0: - time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount) - bg.set_message('(%02i:%02i:%02i) read elem %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,e,increment)) - myElemID = p.element_id(e) - myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items)))) - for n,myNodeID in enumerate(p.element(e).items): - for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value)) - or 1): - myGrainID = g + 1 - - # --- filter valid locations - - filter = substituteLocation(options.filter, [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter - if filter != '' and not eval(filter): # for all filter expressions that are not true:... - continue # ... ignore this data point and continue with next - - # --- group data locations - - group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location - if group not in data: # create a new group if not yet present - data[group] = [] - data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data - data[group][-1].append({ - 'label': 'location', - 'content': [myElemID,myNodeID,myGrainID] + myIpCoordinates[n], - }) # first entry in this list always contains the location data -# print group,sys.getsizeof(data) # better way of tracing leaks: http://www.lshift.net/blog/2008/11/14/tracing-python-memory-leaks - - # --- get data from t16 file - - if options.elementalScalar: - for label in options.elementalScalar: - if assembleHeader: - header.append(label.replace(' ','')) - data[group][-1].append({ - 'label': label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel'][label])[n].value ], - }) - - if options.elementalTensor: - for label in options.elementalTensor: - if assembleHeader: - header += ['%s.%s'%(label.replace(' ',''),component) for component in ['intensity','t11','t22','t33','t12','t23','t13']] - myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n] - data[group][-1].append({ - 'label': label, - 'content': [ myTensor.intensity, - myTensor.t11, myTensor.t22, myTensor.t33, - myTensor.t12, myTensor.t23, myTensor.t13, - ], - }) - - if options.homogenizationResult: - for label in options.homogenizationResult: - outputIndex = list(zip(*outputFormat['Homogenization']['outputs'])[0]).index(label) # find the position of this output in the outputFormat - length = int(outputFormat['Homogenization']['outputs'][outputIndex][1]) - if length > 1: - if assembleHeader: - header += ['%i_%s'%(component+1,label) for component in range(length)] - data[group][-1].append({ - 'label': label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(component+1,label)])[n].value - for component in range(length) ], - }) - else: - if assembleHeader: - header.append(label) - data[group][-1].append({ - 'label': label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%s'%label])[n].value ], - }) - - if options.crystalliteResult: - for label in options.crystalliteResult: - outputIndex = list(zip(*outputFormat['Crystallite']['outputs'])[0]).index(label) # find the position of this output in the outputFormat - length = int(outputFormat['Crystallite']['outputs'][outputIndex][1]) - if length > 1: - if assembleHeader: - header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)] - data[group][-1].append({ - 'label': label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value - for component in range(length) ], - }) - else: - if assembleHeader: - header.append('%i_%s'%(g+1,label)) - data[group][-1].append({ - 'label':label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ], - }) - - if options.constitutiveResult: - for label in options.constitutiveResult: - outputIndex = list(zip(*outputFormat['Constitutive']['outputs'])[0]).index(label) # find the position of this output in the outputFormat - length = int(outputFormat['Constitutive']['outputs'][outputIndex][1]) - if length > 1: - if assembleHeader: - header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)] - data[group][-1].append({ - 'label':label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value - for component in range(length) ], - }) - else: - if assembleHeader: - header.append('%i_%s'%(g+1,label)) - data[group][-1].append({ - 'label':label, - 'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ], - }) - - assembleHeader = False - - if options.separateFiles: - if fileOpen: - file.close() - fileOpen = False - outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],increment)') - else: - outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1]) - - # --- write header to file - - if not fileOpen: - file = open(outFilename,'w') - fileOpen = True - file.write('2\theader\n') - file.write('$Id$\n') - if options.time: - basic = ['inc','time'] - else: - basic = ['inc'] - if options.nodalScalar: - file.write('\t'.join(basic + ['elem','node','grain','node.x','node.y','node.z'] + header) + '\n') - else: - file.write('\t'.join(basic + ['elem','node','grain','ip.x','ip.y','ip.z'] + header) + '\n') - - # --- write data to file - - output = [] - for group in data: - if options.time: - output.append([increment, p.time]) - else: - output.append([increment]) - for chunk in range(len(data[group][0])): - label = data[group][0][chunk]['label'] # name of chunk (e.g. 'orientation', or 'flow stress') - groupContent = [data[group][member][chunk]['content'] for member in range(len(data[group]))] # list of each member's chunk - if label == 'location': - condensedGroupContent = mapFunc(label, groupContent, 'avg') # always average location - if 'elem' not in options.separation: - condensedGroupContent[0] = 'n/a' - if 'node' not in options.separation: - condensedGroupContent[1] = 'n/a' - if 'grain' not in options.separation: - condensedGroupContent[2] = 'n/a' - elif len(groupContent) == 1: - condensedGroupContent = map(str,groupContent[0]) - else: - condensedGroupContent = mapFunc(label, groupContent, options.func) # map function to groupContent to get condensed data of this group's chunk - output[-1] += condensedGroupContent - - sortProperties = [] - for item in options.sort: - if item not in options.separation: - sortProperties.append(item) - - for groupvalues in sortBySeparation(output, options.separation+sortProperties, int(options.time)): # sort output according to separation criteria - file.write('\t'.join(map(str,groupvalues)) + '\n') + p.moveto(increment+offset_inc) + +# --------------------------- file management -------------------------------- + if options.separateFiles: + if fileOpen: + file.close() + fileOpen = False + outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],increment)') + else: + outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1]) + + if not fileOpen: + file = open(outFilename,'w') + fileOpen = True + file.write('2\theader\n') + file.write('$Id$\n') + headerWritten = False + + file.flush() + +# --------------------------- read and map data per group -------------------------------- + + member = 0 + for i,group in enumerate(groups): + + N = 0 # group member counter + for (e,n,g,n_local) in group[1:]: # loop over group members + member += 1 + if member%1000 == 0: + time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start) + bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment)) + newby = [] # current member's data + if options.elementalScalar: + for label in options.elementalScalar: + if assembleHeader: + header += [label.replace(' ','')] + newby.append({'label':label, + 'len':1, + 'content':[ p.element_scalar(e,stat['IndexOfLabel'][label])[n_local].value ]}) + + if options.elementalTensor: + for label in options.elementalTensor: + if assembleHeader: + header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']]) + myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n_local] + newby.append({'label':label, + 'len':length, + 'content':[ myTensor.intensity, + myTensor.t11, myTensor.t22, myTensor.t33, + myTensor.t12, myTensor.t23, myTensor.t13, + ]}) + + if options.homogenizationResult or \ + options.crystalliteResult or \ + options.constitutiveResult: + for (label,resultType) in zip(options.homogenizationResult + + options.crystalliteResult + + options.constitutiveResult, + ['Homogenization']*len(options.homogenizationResult) + + ['Crystallite']*len(options.crystalliteResult) + + ['Constitutive']*len(options.constitutiveResult) + ): + outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat + length = int(outputFormat[resultType]['outputs'][outputIndex][1]) + if resultType == 'Homogenization': + thisHead = heading('_',[[component,label] for component in range(int(length>1),length+int(length>1))]) + else: + thisHead = heading('_',[[g,component,label] for component in range(int(length>1),length+int(length>1))]) + if assembleHeader: header += thisHead + newby.append({'label':label, + 'len':length, + 'content':[ p.element_scalar(e,stat['IndexOfLabel'][head])[n_local].value + for head in thisHead ]}) + + assembleHeader = False + + if N == 0: mappedResult = [0.0]*len(header) + + pos = 0 + for chunk in newby: + mappedResult[pos:pos+chunk['len']] = mapIncremental(chunk['label'],options.func, + N,mappedResult[pos:pos+chunk['len']],chunk['content']) + pos += chunk['len'] + + N += 1 + + # --- write data row to file --- + + if not headerWritten: + file.write('\t'.join(standard + header) + '\n') + headerWritten = True + + file.write('\t'.join(map(str,[increment] + \ + {True:[p.time],False:[]}[options.time] + \ + group[0] + \ + mappedResult) + ) + '\n') + if fileOpen: - file.close() + file.close() # --------------------------- DONE -------------------------------- -