major recoding.

now with tiny memory footprint and better guessing of remaining time.
This commit is contained in:
Philip Eisenlohr 2011-04-12 17:46:35 +00:00
parent ef4fc9d0ee
commit 482be626e0
1 changed files with 546 additions and 552 deletions

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
import os, sys, math, re, threading, time, struct import pdb, os, sys, gc, math, re, threading, time, struct
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
releases = {'2010':['linux64',''], releases = {'2010':['linux64',''],
@ -56,6 +56,7 @@ class MPIEspectral_result: # mimic py_post result object
extrapolate = '' extrapolate = ''
N_increments = 0 N_increments = 0
increment = 0 increment = 0
time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
N_nodes = 0 N_nodes = 0
N_node_scalars = 0 N_node_scalars = 0
N_elements = 0 N_elements = 0
@ -156,7 +157,7 @@ class MPIEspectral_result: # mimic py_post result object
c = self.resolution[2]+1 c = self.resolution[2]+1
basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
basenode2 = basenode+a*b basenode2 = basenode+a*b
return (element([basenode ,basenode+1 ,basenode+a+1 ,basenode+a, return (element([basenode ,basenode +1,basenode +a+1,basenode +a,
basenode2,basenode2+1,basenode2+a+1,basenode2+a, basenode2,basenode2+1,basenode2+a+1,basenode2+a,
],117)) ],117))
@ -290,27 +291,6 @@ def ipCoords(elemType, nodalCoordinates):
return ipCoordinates return ipCoordinates
# -----------------------------
def sortBySeparation(dataArray, criteria, offset):
#
# sorting of groupValue array according to list of criteria
# -----------------------------
where = {
'elem': 1,
'node': 2,
'grain': 3,
'x': 4,
'y': 5,
'z': 6,
}
theKeys = []
for criterium in criteria:
if criterium in where:
theKeys.append('x[%i]'%(offset+where[criterium]))
exec('sortedArray = sorted(dataArray,key=lambda x:(%s))'%(','.join(theKeys)))
return sortedArray
# ----------------------------- # -----------------------------
def substituteLocation(string, mesh, coords): def substituteLocation(string, mesh, coords):
@ -327,51 +307,69 @@ def substituteLocation(string, mesh, coords):
return substitute return substitute
# ----------------------------- # -----------------------------
def average(theList): def heading(glue,parts):
# #
# calcs the average of a list of numbers # joins pieces from parts by glue. second to last entry in pieces tells multiplicity
# ----------------------------- # -----------------------------
return sum(map(float,theList))/len(theList) header = []
for pieces in parts:
if pieces[-2] == 0:
del pieces[-2]
header.append(glue.join(map(str,pieces)))
return header
# ----------------------------- # -----------------------------
def mapFunc(label, chunks, func): def illegalMap(map, label):
# #
# applies the function defined by "func" # answers whether map is illegal to be applied to data what
# (can be either 'min','max','avg', 'sum', or user specified)
# to a list of lists of data
# ----------------------------- # -----------------------------
illegal = { illegal = {
'eulerangles': ['min','max','avg','sum'], 'eulerangles': ['min','max','avg','sum'],
'defgrad': ['min','max','avg','sum'], 'defgrad': ['min','max','avg','sum'],
'orientation': ['min','max','sum'], 'orientation': ['min','max', 'sum'],
} }
if label.lower() in illegal and func in illegal[label.lower()]: # for illegal mappings:... return label.lower() in illegal and map in illegal[label.lower()]
return ['n/a' for i in range(len(chunks[0]))] # ...return 'n/a'
# -----------------------------
def mapIncremental(label, mapping, N, base, new):
#
# applies the function defined by "mapping"
# (can be either 'min','max','avg', 'sum', or user specified)
# to a list of data
# -----------------------------
if illegalMap(mapping,label): # for illegal mappings:...
return ['n/a'*len(base)] # ...return 'n/a'
else: else:
if func in ['min','max','avg']: if mapping in ['min','max','avg','sum']:
mapped = [{ 'min': lambda x: min(x), mapped = map(
'max': lambda x: max(x), { 'min': lambda n,b,a: min(b,a),
'avg': lambda x: average(x), 'max': lambda n,b,a: max(b,a),
'sum': lambda x: sum(x), 'avg': lambda n,b,a: (n*b+a)/(n+1),
}[func](column) for column in zip(*chunks)] # map one of the standard functions to colums in chunks 'sum': lambda n,b,a: b+a,
}[mapping],[N]*len(base),base,new) # map one of the standard functions to data
if label.lower() == 'orientation': # orientation is special case:... if label.lower() == 'orientation': # orientation is special case:...
orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion
mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion
else: else:
try: try:
mapped = eval('map(%s,zip(*chunks))'%func) # map user defined function to colums in chunks mapped = eval('map(%s,N*len(base),base,new)'%map) # map user defined function to colums in chunks
except: except:
mapped = ['n/a' for i in range(len(chunks[0]))] mapped = ['n/a'*len(base)]
return mapped return mapped
# ----------------------------- # -----------------------------
def OpenPostfile(name,type): def OpenPostfile(name,type):
# #
@ -680,6 +678,12 @@ if options.nodalScalar and ( options.elementalScalar or options.elementalTenso
parser.print_help() parser.print_help()
parser.error('not allowed to mix nodal with elemental results...') parser.error('not allowed to mix nodal with elemental results...')
if not options.nodalScalar: options.nodalScalar = []
if not options.elementalScalar: options.elementalScalar = []
if not options.elementalTensor: options.elementalTensor = []
if not options.homogenizationResult: options.homogenizationResult = []
if not options.crystalliteResult: options.crystalliteResult = []
if not options.constitutiveResult: options.constitutiveResult = []
# --- start background messaging # --- start background messaging
@ -760,27 +764,19 @@ if options.range:
min(stat['NumberOfIncrements'],options.range[1]+1), min(stat['NumberOfIncrements'],options.range[1]+1),
options.range[2]) options.range[2])
fileOpen = False
assembleHeader = True
header = []
element_scalar = {} # --------------------------- build group membership --------------------------------
element_tensor = {}
p.moveto(increments[0]+offset_inc)
index = {}
groups = []
groupCount = 0
memberCount = 0
# --- loop over increments if options.nodalScalar:
for n in xrange(stat['NumberOfNodes']):
time_start = time.time() if n%1000 == 0:
bg.set_message('scan node %i...'%n)
for incCount,increment in enumerate(increments):
p.moveto(increment+offset_inc)
data = {}
if options.nodalScalar:
for n in range(stat['NumberOfNodes']):
if n%100 == 0:
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
bg.set_message('(%02i:%02i:%02i) read node %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,n,increment))
myNodeID = p.node_id(n) myNodeID = p.node_id(n)
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z] myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
myElemID = 0 myElemID = 0
@ -794,32 +790,24 @@ for incCount,increment in enumerate(increments):
# --- group data locations # --- group data locations
group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
if group not in data: # create a new group if not yet present
data[group] = []
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
data[group][-1].append({
'label': 'location',
'content': [myElemID,myNodeID,myGrainID] + myNodeCoordinates,
}) # first entry in this list always contains the location data
# --- get data from t16 file if grp not in index: # create a new group if not yet present
index[grp] = groupCount
groups[groupCount] = [[0,0,0,0.0,0.0,0.0]] # initialize with avg location
groupCount += 1
for label in options.nodalScalar: groups[index[grp]][0] = mapIncremental('','avg',
if assembleHeader: len(groups[index[grp]])-1,
header.append(label.replace(' ','')) groups[index[grp]][0],
data[group][-1].append({ [myElemID,myNodeID,myGrainID] + myNodeCoordinates) # incrementally update average location
'label': label, groups[index[grp]].append([myElemID,myNodeID,myGrainID]) # append a new list defining each group member
'content': [ p.node_scalar(n,stat['IndexOfLabel'][label]) ], memberCount += 1
})
assembleHeader = False else:
for e in xrange(stat['NumberOfElements']):
else: if e%1000 == 0:
for e in range(stat['NumberOfElements']): bg.set_message('scan elem %i...'%e)
if e%100 == 0:
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
bg.set_message('(%02i:%02i:%02i) read elem %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,e,increment))
myElemID = p.element_id(e) myElemID = p.element_id(e)
myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items)))) myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items))))
for n,myNodeID in enumerate(p.element(e).items): for n,myNodeID in enumerate(p.element(e).items):
@ -835,101 +823,70 @@ for incCount,increment in enumerate(increments):
# --- group data locations # --- group data locations
group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
if group not in data: # create a new group if not yet present
data[group] = []
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
data[group][-1].append({
'label': 'location',
'content': [myElemID,myNodeID,myGrainID] + myIpCoordinates[n],
}) # first entry in this list always contains the location data
# print group,sys.getsizeof(data) # better way of tracing leaks: http://www.lshift.net/blog/2008/11/14/tracing-python-memory-leaks
# --- get data from t16 file if grp not in index: # create a new group if not yet present
index[grp] = groupCount
groups.append([[0,0,0,0.0,0.0,0.0]]) # initialize with avg location
groupCount += 1
if options.elementalScalar: groups[index[grp]][0] = mapIncremental('','avg',
for label in options.elementalScalar: len(groups[index[grp]])-1,
if assembleHeader: groups[index[grp]][0],
header.append(label.replace(' ','')) [myElemID,myNodeID,myGrainID] + myIpCoordinates[n]) # incrementally update average location
data[group][-1].append({ groups[index[grp]].append([myElemID,myNodeID,myGrainID,n]) # append a new list defining each group member
'label': label, memberCount += 1
'content': [ p.element_scalar(e,stat['IndexOfLabel'][label])[n].value ],
})
if options.elementalTensor: # --------------------------- prevent avg of e,n,g --------------------------------
for label in options.elementalTensor:
if assembleHeader:
header += ['%s.%s'%(label.replace(' ',''),component) for component in ['intensity','t11','t22','t33','t12','t23','t13']]
myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n]
data[group][-1].append({
'label': label,
'content': [ myTensor.intensity,
myTensor.t11, myTensor.t22, myTensor.t33,
myTensor.t12, myTensor.t23, myTensor.t13,
],
})
if options.homogenizationResult: for grp in xrange(len(groups)):
for label in options.homogenizationResult: if len(groups[grp]) > 2: # more than one member in group? (avgLoc + 2+ entries?)
outputIndex = list(zip(*outputFormat['Homogenization']['outputs'])[0]).index(label) # find the position of this output in the outputFormat groups[grp][0][:3] = ['n/a','n/a','n/a'] # no avg value for elem, ip, or grain meaningful
length = int(outputFormat['Homogenization']['outputs'][outputIndex][1])
if length > 1:
if assembleHeader:
header += ['%i_%s'%(component+1,label) for component in range(length)]
data[group][-1].append({
'label': label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(component+1,label)])[n].value
for component in range(length) ],
})
else:
if assembleHeader:
header.append(label)
data[group][-1].append({
'label': label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%s'%label])[n].value ],
})
if options.crystalliteResult: # --------------------------- sort groups --------------------------------
for label in options.crystalliteResult:
outputIndex = list(zip(*outputFormat['Crystallite']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
length = int(outputFormat['Crystallite']['outputs'][outputIndex][1])
if length > 1:
if assembleHeader:
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)]
data[group][-1].append({
'label': label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
for component in range(length) ],
})
else:
if assembleHeader:
header.append('%i_%s'%(g+1,label))
data[group][-1].append({
'label':label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
})
if options.constitutiveResult: where = {
for label in options.constitutiveResult: 'elem': 0,
outputIndex = list(zip(*outputFormat['Constitutive']['outputs'])[0]).index(label) # find the position of this output in the outputFormat 'node': 1,
length = int(outputFormat['Constitutive']['outputs'][outputIndex][1]) 'grain': 2,
if length > 1: 'x': 3,
if assembleHeader: 'y': 4,
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)] 'z': 5,
data[group][-1].append({ }
'label':label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
for component in range(length) ],
})
else:
if assembleHeader:
header.append('%i_%s'%(g+1,label))
data[group][-1].append({
'label':label,
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
})
assembleHeader = False sortProperties = []
for item in options.sort:
if item not in options.separation:
sortProperties.append(item)
theKeys = []
for criterium in options.separation+sortProperties:
if criterium in where:
theKeys.append('x[0][%i]'%where[criterium])
sortKeys = eval('lambda x:(%s)'%(','.join(theKeys)))
bg.set_message('sorting groups...')
groups.sort(key = sortKeys) # in-place sorting to save mem
fileOpen = False
assembleHeader = True
header = []
standard = ['inc'] + \
{True: ['time'],
False:[]}[options.time] + \
['elem','node','grain'] + \
{True: ['node.x','node.y','node.z'],
False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []]
# --------------------------- loop over increments --------------------------------
time_start = time.time()
for incCount,increment in enumerate(increments):
p.moveto(increment+offset_inc)
# --------------------------- file management --------------------------------
if options.separateFiles: if options.separateFiles:
if fileOpen: if fileOpen:
@ -939,58 +896,95 @@ for incCount,increment in enumerate(increments):
else: else:
outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1]) outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1])
# --- write header to file
if not fileOpen: if not fileOpen:
file = open(outFilename,'w') file = open(outFilename,'w')
fileOpen = True fileOpen = True
file.write('2\theader\n') file.write('2\theader\n')
file.write('$Id$\n') file.write('$Id$\n')
if options.time: headerWritten = False
basic = ['inc','time']
else:
basic = ['inc']
if options.nodalScalar:
file.write('\t'.join(basic + ['elem','node','grain','node.x','node.y','node.z'] + header) + '\n')
else:
file.write('\t'.join(basic + ['elem','node','grain','ip.x','ip.y','ip.z'] + header) + '\n')
# --- write data to file file.flush()
output = [] # --------------------------- read and map data per group --------------------------------
for group in data:
if options.time: member = 0
output.append([increment, p.time]) for i,group in enumerate(groups):
N = 0 # group member counter
for (e,n,g,n_local) in group[1:]: # loop over group members
member += 1
if member%1000 == 0:
time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment))
newby = [] # current member's data
if options.elementalScalar:
for label in options.elementalScalar:
if assembleHeader:
header += [label.replace(' ','')]
newby.append({'label':label,
'len':1,
'content':[ p.element_scalar(e,stat['IndexOfLabel'][label])[n_local].value ]})
if options.elementalTensor:
for label in options.elementalTensor:
if assembleHeader:
header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']])
myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n_local]
newby.append({'label':label,
'len':length,
'content':[ myTensor.intensity,
myTensor.t11, myTensor.t22, myTensor.t33,
myTensor.t12, myTensor.t23, myTensor.t13,
]})
if options.homogenizationResult or \
options.crystalliteResult or \
options.constitutiveResult:
for (label,resultType) in zip(options.homogenizationResult +
options.crystalliteResult +
options.constitutiveResult,
['Homogenization']*len(options.homogenizationResult) +
['Crystallite']*len(options.crystalliteResult) +
['Constitutive']*len(options.constitutiveResult)
):
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
length = int(outputFormat[resultType]['outputs'][outputIndex][1])
if resultType == 'Homogenization':
thisHead = heading('_',[[component,label] for component in range(int(length>1),length+int(length>1))])
else: else:
output.append([increment]) thisHead = heading('_',[[g,component,label] for component in range(int(length>1),length+int(length>1))])
for chunk in range(len(data[group][0])): if assembleHeader: header += thisHead
label = data[group][0][chunk]['label'] # name of chunk (e.g. 'orientation', or 'flow stress') newby.append({'label':label,
groupContent = [data[group][member][chunk]['content'] for member in range(len(data[group]))] # list of each member's chunk 'len':length,
if label == 'location': 'content':[ p.element_scalar(e,stat['IndexOfLabel'][head])[n_local].value
condensedGroupContent = mapFunc(label, groupContent, 'avg') # always average location for head in thisHead ]})
if 'elem' not in options.separation:
condensedGroupContent[0] = 'n/a'
if 'node' not in options.separation:
condensedGroupContent[1] = 'n/a'
if 'grain' not in options.separation:
condensedGroupContent[2] = 'n/a'
elif len(groupContent) == 1:
condensedGroupContent = map(str,groupContent[0])
else:
condensedGroupContent = mapFunc(label, groupContent, options.func) # map function to groupContent to get condensed data of this group's chunk
output[-1] += condensedGroupContent
sortProperties = [] assembleHeader = False
for item in options.sort:
if item not in options.separation:
sortProperties.append(item)
for groupvalues in sortBySeparation(output, options.separation+sortProperties, int(options.time)): # sort output according to separation criteria if N == 0: mappedResult = [0.0]*len(header)
file.write('\t'.join(map(str,groupvalues)) + '\n')
pos = 0
for chunk in newby:
mappedResult[pos:pos+chunk['len']] = mapIncremental(chunk['label'],options.func,
N,mappedResult[pos:pos+chunk['len']],chunk['content'])
pos += chunk['len']
N += 1
# --- write data row to file ---
if not headerWritten:
file.write('\t'.join(standard + header) + '\n')
headerWritten = True
file.write('\t'.join(map(str,[increment] + \
{True:[p.time],False:[]}[options.time] + \
group[0] + \
mappedResult)
) + '\n')
if fileOpen: if fileOpen:
file.close() file.close()
# --------------------------- DONE -------------------------------- # --------------------------- DONE --------------------------------