major recoding.
now with tiny memory footprint and better guessing of remaining time.
This commit is contained in:
parent
ef4fc9d0ee
commit
482be626e0
|
@ -1,6 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os, sys, math, re, threading, time, struct
|
||||
import pdb, os, sys, gc, math, re, threading, time, struct
|
||||
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
|
||||
|
||||
releases = {'2010':['linux64',''],
|
||||
|
@ -56,6 +56,7 @@ class MPIEspectral_result: # mimic py_post result object
|
|||
extrapolate = ''
|
||||
N_increments = 0
|
||||
increment = 0
|
||||
time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
|
||||
N_nodes = 0
|
||||
N_node_scalars = 0
|
||||
N_elements = 0
|
||||
|
@ -156,7 +157,7 @@ class MPIEspectral_result: # mimic py_post result object
|
|||
c = self.resolution[2]+1
|
||||
basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
|
||||
basenode2 = basenode+a*b
|
||||
return (element([basenode ,basenode+1 ,basenode+a+1 ,basenode+a,
|
||||
return (element([basenode ,basenode +1,basenode +a+1,basenode +a,
|
||||
basenode2,basenode2+1,basenode2+a+1,basenode2+a,
|
||||
],117))
|
||||
|
||||
|
@ -290,27 +291,6 @@ def ipCoords(elemType, nodalCoordinates):
|
|||
return ipCoordinates
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def sortBySeparation(dataArray, criteria, offset):
|
||||
#
|
||||
# sorting of groupValue array according to list of criteria
|
||||
# -----------------------------
|
||||
where = {
|
||||
'elem': 1,
|
||||
'node': 2,
|
||||
'grain': 3,
|
||||
'x': 4,
|
||||
'y': 5,
|
||||
'z': 6,
|
||||
}
|
||||
|
||||
theKeys = []
|
||||
for criterium in criteria:
|
||||
if criterium in where:
|
||||
theKeys.append('x[%i]'%(offset+where[criterium]))
|
||||
exec('sortedArray = sorted(dataArray,key=lambda x:(%s))'%(','.join(theKeys)))
|
||||
return sortedArray
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def substituteLocation(string, mesh, coords):
|
||||
|
@ -327,51 +307,69 @@ def substituteLocation(string, mesh, coords):
|
|||
return substitute
|
||||
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def average(theList):
|
||||
def heading(glue,parts):
|
||||
#
|
||||
# calcs the average of a list of numbers
|
||||
# joins pieces from parts by glue. second to last entry in pieces tells multiplicity
|
||||
# -----------------------------
|
||||
|
||||
return sum(map(float,theList))/len(theList)
|
||||
header = []
|
||||
for pieces in parts:
|
||||
if pieces[-2] == 0:
|
||||
del pieces[-2]
|
||||
header.append(glue.join(map(str,pieces)))
|
||||
|
||||
return header
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def mapFunc(label, chunks, func):
|
||||
def illegalMap(map, label):
|
||||
#
|
||||
# applies the function defined by "func"
|
||||
# (can be either 'min','max','avg', 'sum', or user specified)
|
||||
# to a list of lists of data
|
||||
# answers whether map is illegal to be applied to data what
|
||||
# -----------------------------
|
||||
|
||||
illegal = {
|
||||
'eulerangles': ['min','max','avg','sum'],
|
||||
'defgrad': ['min','max','avg','sum'],
|
||||
'orientation': ['min','max','sum'],
|
||||
'orientation': ['min','max', 'sum'],
|
||||
}
|
||||
|
||||
if label.lower() in illegal and func in illegal[label.lower()]: # for illegal mappings:...
|
||||
return ['n/a' for i in range(len(chunks[0]))] # ...return 'n/a'
|
||||
return label.lower() in illegal and map in illegal[label.lower()]
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def mapIncremental(label, mapping, N, base, new):
|
||||
#
|
||||
# applies the function defined by "mapping"
|
||||
# (can be either 'min','max','avg', 'sum', or user specified)
|
||||
# to a list of data
|
||||
# -----------------------------
|
||||
|
||||
if illegalMap(mapping,label): # for illegal mappings:...
|
||||
return ['n/a'*len(base)] # ...return 'n/a'
|
||||
|
||||
else:
|
||||
if func in ['min','max','avg']:
|
||||
mapped = [{ 'min': lambda x: min(x),
|
||||
'max': lambda x: max(x),
|
||||
'avg': lambda x: average(x),
|
||||
'sum': lambda x: sum(x),
|
||||
}[func](column) for column in zip(*chunks)] # map one of the standard functions to colums in chunks
|
||||
if mapping in ['min','max','avg','sum']:
|
||||
mapped = map(
|
||||
{ 'min': lambda n,b,a: min(b,a),
|
||||
'max': lambda n,b,a: max(b,a),
|
||||
'avg': lambda n,b,a: (n*b+a)/(n+1),
|
||||
'sum': lambda n,b,a: b+a,
|
||||
}[mapping],[N]*len(base),base,new) # map one of the standard functions to data
|
||||
if label.lower() == 'orientation': # orientation is special case:...
|
||||
orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion
|
||||
mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion
|
||||
else:
|
||||
try:
|
||||
mapped = eval('map(%s,zip(*chunks))'%func) # map user defined function to colums in chunks
|
||||
mapped = eval('map(%s,N*len(base),base,new)'%map) # map user defined function to colums in chunks
|
||||
except:
|
||||
mapped = ['n/a' for i in range(len(chunks[0]))]
|
||||
mapped = ['n/a'*len(base)]
|
||||
|
||||
return mapped
|
||||
|
||||
|
||||
|
||||
# -----------------------------
|
||||
def OpenPostfile(name,type):
|
||||
#
|
||||
|
@ -680,6 +678,12 @@ if options.nodalScalar and ( options.elementalScalar or options.elementalTenso
|
|||
parser.print_help()
|
||||
parser.error('not allowed to mix nodal with elemental results...')
|
||||
|
||||
if not options.nodalScalar: options.nodalScalar = []
|
||||
if not options.elementalScalar: options.elementalScalar = []
|
||||
if not options.elementalTensor: options.elementalTensor = []
|
||||
if not options.homogenizationResult: options.homogenizationResult = []
|
||||
if not options.crystalliteResult: options.crystalliteResult = []
|
||||
if not options.constitutiveResult: options.constitutiveResult = []
|
||||
|
||||
# --- start background messaging
|
||||
|
||||
|
@ -760,27 +764,19 @@ if options.range:
|
|||
min(stat['NumberOfIncrements'],options.range[1]+1),
|
||||
options.range[2])
|
||||
|
||||
fileOpen = False
|
||||
assembleHeader = True
|
||||
header = []
|
||||
|
||||
element_scalar = {}
|
||||
element_tensor = {}
|
||||
# --------------------------- build group membership --------------------------------
|
||||
|
||||
p.moveto(increments[0]+offset_inc)
|
||||
index = {}
|
||||
groups = []
|
||||
groupCount = 0
|
||||
memberCount = 0
|
||||
|
||||
# --- loop over increments
|
||||
|
||||
time_start = time.time()
|
||||
|
||||
for incCount,increment in enumerate(increments):
|
||||
p.moveto(increment+offset_inc)
|
||||
data = {}
|
||||
|
||||
if options.nodalScalar:
|
||||
for n in range(stat['NumberOfNodes']):
|
||||
if n%100 == 0:
|
||||
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
|
||||
bg.set_message('(%02i:%02i:%02i) read node %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,n,increment))
|
||||
if options.nodalScalar:
|
||||
for n in xrange(stat['NumberOfNodes']):
|
||||
if n%1000 == 0:
|
||||
bg.set_message('scan node %i...'%n)
|
||||
myNodeID = p.node_id(n)
|
||||
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
|
||||
myElemID = 0
|
||||
|
@ -794,32 +790,24 @@ for incCount,increment in enumerate(increments):
|
|||
|
||||
# --- group data locations
|
||||
|
||||
group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||
if group not in data: # create a new group if not yet present
|
||||
data[group] = []
|
||||
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
|
||||
data[group][-1].append({
|
||||
'label': 'location',
|
||||
'content': [myElemID,myNodeID,myGrainID] + myNodeCoordinates,
|
||||
}) # first entry in this list always contains the location data
|
||||
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||
|
||||
# --- get data from t16 file
|
||||
if grp not in index: # create a new group if not yet present
|
||||
index[grp] = groupCount
|
||||
groups[groupCount] = [[0,0,0,0.0,0.0,0.0]] # initialize with avg location
|
||||
groupCount += 1
|
||||
|
||||
for label in options.nodalScalar:
|
||||
if assembleHeader:
|
||||
header.append(label.replace(' ',''))
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ p.node_scalar(n,stat['IndexOfLabel'][label]) ],
|
||||
})
|
||||
groups[index[grp]][0] = mapIncremental('','avg',
|
||||
len(groups[index[grp]])-1,
|
||||
groups[index[grp]][0],
|
||||
[myElemID,myNodeID,myGrainID] + myNodeCoordinates) # incrementally update average location
|
||||
groups[index[grp]].append([myElemID,myNodeID,myGrainID]) # append a new list defining each group member
|
||||
memberCount += 1
|
||||
|
||||
assembleHeader = False
|
||||
|
||||
else:
|
||||
for e in range(stat['NumberOfElements']):
|
||||
if e%100 == 0:
|
||||
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
|
||||
bg.set_message('(%02i:%02i:%02i) read elem %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,e,increment))
|
||||
else:
|
||||
for e in xrange(stat['NumberOfElements']):
|
||||
if e%1000 == 0:
|
||||
bg.set_message('scan elem %i...'%e)
|
||||
myElemID = p.element_id(e)
|
||||
myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items))))
|
||||
for n,myNodeID in enumerate(p.element(e).items):
|
||||
|
@ -835,101 +823,70 @@ for incCount,increment in enumerate(increments):
|
|||
|
||||
# --- group data locations
|
||||
|
||||
group = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||
if group not in data: # create a new group if not yet present
|
||||
data[group] = []
|
||||
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
|
||||
data[group][-1].append({
|
||||
'label': 'location',
|
||||
'content': [myElemID,myNodeID,myGrainID] + myIpCoordinates[n],
|
||||
}) # first entry in this list always contains the location data
|
||||
# print group,sys.getsizeof(data) # better way of tracing leaks: http://www.lshift.net/blog/2008/11/14/tracing-python-memory-leaks
|
||||
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||
|
||||
# --- get data from t16 file
|
||||
if grp not in index: # create a new group if not yet present
|
||||
index[grp] = groupCount
|
||||
groups.append([[0,0,0,0.0,0.0,0.0]]) # initialize with avg location
|
||||
groupCount += 1
|
||||
|
||||
if options.elementalScalar:
|
||||
for label in options.elementalScalar:
|
||||
if assembleHeader:
|
||||
header.append(label.replace(' ',''))
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel'][label])[n].value ],
|
||||
})
|
||||
groups[index[grp]][0] = mapIncremental('','avg',
|
||||
len(groups[index[grp]])-1,
|
||||
groups[index[grp]][0],
|
||||
[myElemID,myNodeID,myGrainID] + myIpCoordinates[n]) # incrementally update average location
|
||||
groups[index[grp]].append([myElemID,myNodeID,myGrainID,n]) # append a new list defining each group member
|
||||
memberCount += 1
|
||||
|
||||
if options.elementalTensor:
|
||||
for label in options.elementalTensor:
|
||||
if assembleHeader:
|
||||
header += ['%s.%s'%(label.replace(' ',''),component) for component in ['intensity','t11','t22','t33','t12','t23','t13']]
|
||||
myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n]
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ myTensor.intensity,
|
||||
myTensor.t11, myTensor.t22, myTensor.t33,
|
||||
myTensor.t12, myTensor.t23, myTensor.t13,
|
||||
],
|
||||
})
|
||||
# --------------------------- prevent avg of e,n,g --------------------------------
|
||||
|
||||
if options.homogenizationResult:
|
||||
for label in options.homogenizationResult:
|
||||
outputIndex = list(zip(*outputFormat['Homogenization']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||
length = int(outputFormat['Homogenization']['outputs'][outputIndex][1])
|
||||
if length > 1:
|
||||
if assembleHeader:
|
||||
header += ['%i_%s'%(component+1,label) for component in range(length)]
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(component+1,label)])[n].value
|
||||
for component in range(length) ],
|
||||
})
|
||||
else:
|
||||
if assembleHeader:
|
||||
header.append(label)
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%s'%label])[n].value ],
|
||||
})
|
||||
for grp in xrange(len(groups)):
|
||||
if len(groups[grp]) > 2: # more than one member in group? (avgLoc + 2+ entries?)
|
||||
groups[grp][0][:3] = ['n/a','n/a','n/a'] # no avg value for elem, ip, or grain meaningful
|
||||
|
||||
if options.crystalliteResult:
|
||||
for label in options.crystalliteResult:
|
||||
outputIndex = list(zip(*outputFormat['Crystallite']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||
length = int(outputFormat['Crystallite']['outputs'][outputIndex][1])
|
||||
if length > 1:
|
||||
if assembleHeader:
|
||||
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)]
|
||||
data[group][-1].append({
|
||||
'label': label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
|
||||
for component in range(length) ],
|
||||
})
|
||||
else:
|
||||
if assembleHeader:
|
||||
header.append('%i_%s'%(g+1,label))
|
||||
data[group][-1].append({
|
||||
'label':label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
|
||||
})
|
||||
# --------------------------- sort groups --------------------------------
|
||||
|
||||
if options.constitutiveResult:
|
||||
for label in options.constitutiveResult:
|
||||
outputIndex = list(zip(*outputFormat['Constitutive']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||
length = int(outputFormat['Constitutive']['outputs'][outputIndex][1])
|
||||
if length > 1:
|
||||
if assembleHeader:
|
||||
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)]
|
||||
data[group][-1].append({
|
||||
'label':label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
|
||||
for component in range(length) ],
|
||||
})
|
||||
else:
|
||||
if assembleHeader:
|
||||
header.append('%i_%s'%(g+1,label))
|
||||
data[group][-1].append({
|
||||
'label':label,
|
||||
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
|
||||
})
|
||||
where = {
|
||||
'elem': 0,
|
||||
'node': 1,
|
||||
'grain': 2,
|
||||
'x': 3,
|
||||
'y': 4,
|
||||
'z': 5,
|
||||
}
|
||||
|
||||
assembleHeader = False
|
||||
sortProperties = []
|
||||
for item in options.sort:
|
||||
if item not in options.separation:
|
||||
sortProperties.append(item)
|
||||
|
||||
theKeys = []
|
||||
for criterium in options.separation+sortProperties:
|
||||
if criterium in where:
|
||||
theKeys.append('x[0][%i]'%where[criterium])
|
||||
|
||||
sortKeys = eval('lambda x:(%s)'%(','.join(theKeys)))
|
||||
bg.set_message('sorting groups...')
|
||||
groups.sort(key = sortKeys) # in-place sorting to save mem
|
||||
|
||||
|
||||
fileOpen = False
|
||||
assembleHeader = True
|
||||
header = []
|
||||
standard = ['inc'] + \
|
||||
{True: ['time'],
|
||||
False:[]}[options.time] + \
|
||||
['elem','node','grain'] + \
|
||||
{True: ['node.x','node.y','node.z'],
|
||||
False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []]
|
||||
|
||||
# --------------------------- loop over increments --------------------------------
|
||||
|
||||
time_start = time.time()
|
||||
|
||||
for incCount,increment in enumerate(increments):
|
||||
p.moveto(increment+offset_inc)
|
||||
|
||||
# --------------------------- file management --------------------------------
|
||||
|
||||
if options.separateFiles:
|
||||
if fileOpen:
|
||||
|
@ -939,58 +896,95 @@ for incCount,increment in enumerate(increments):
|
|||
else:
|
||||
outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1])
|
||||
|
||||
# --- write header to file
|
||||
|
||||
if not fileOpen:
|
||||
file = open(outFilename,'w')
|
||||
fileOpen = True
|
||||
file.write('2\theader\n')
|
||||
file.write('$Id$\n')
|
||||
if options.time:
|
||||
basic = ['inc','time']
|
||||
else:
|
||||
basic = ['inc']
|
||||
if options.nodalScalar:
|
||||
file.write('\t'.join(basic + ['elem','node','grain','node.x','node.y','node.z'] + header) + '\n')
|
||||
else:
|
||||
file.write('\t'.join(basic + ['elem','node','grain','ip.x','ip.y','ip.z'] + header) + '\n')
|
||||
headerWritten = False
|
||||
|
||||
# --- write data to file
|
||||
file.flush()
|
||||
|
||||
output = []
|
||||
for group in data:
|
||||
if options.time:
|
||||
output.append([increment, p.time])
|
||||
# --------------------------- read and map data per group --------------------------------
|
||||
|
||||
member = 0
|
||||
for i,group in enumerate(groups):
|
||||
|
||||
N = 0 # group member counter
|
||||
for (e,n,g,n_local) in group[1:]: # loop over group members
|
||||
member += 1
|
||||
if member%1000 == 0:
|
||||
time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
|
||||
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment))
|
||||
newby = [] # current member's data
|
||||
if options.elementalScalar:
|
||||
for label in options.elementalScalar:
|
||||
if assembleHeader:
|
||||
header += [label.replace(' ','')]
|
||||
newby.append({'label':label,
|
||||
'len':1,
|
||||
'content':[ p.element_scalar(e,stat['IndexOfLabel'][label])[n_local].value ]})
|
||||
|
||||
if options.elementalTensor:
|
||||
for label in options.elementalTensor:
|
||||
if assembleHeader:
|
||||
header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']])
|
||||
myTensor = p.element_tensor(e,stat['IndexOfLabel'][label])[n_local]
|
||||
newby.append({'label':label,
|
||||
'len':length,
|
||||
'content':[ myTensor.intensity,
|
||||
myTensor.t11, myTensor.t22, myTensor.t33,
|
||||
myTensor.t12, myTensor.t23, myTensor.t13,
|
||||
]})
|
||||
|
||||
if options.homogenizationResult or \
|
||||
options.crystalliteResult or \
|
||||
options.constitutiveResult:
|
||||
for (label,resultType) in zip(options.homogenizationResult +
|
||||
options.crystalliteResult +
|
||||
options.constitutiveResult,
|
||||
['Homogenization']*len(options.homogenizationResult) +
|
||||
['Crystallite']*len(options.crystalliteResult) +
|
||||
['Constitutive']*len(options.constitutiveResult)
|
||||
):
|
||||
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||
length = int(outputFormat[resultType]['outputs'][outputIndex][1])
|
||||
if resultType == 'Homogenization':
|
||||
thisHead = heading('_',[[component,label] for component in range(int(length>1),length+int(length>1))])
|
||||
else:
|
||||
output.append([increment])
|
||||
for chunk in range(len(data[group][0])):
|
||||
label = data[group][0][chunk]['label'] # name of chunk (e.g. 'orientation', or 'flow stress')
|
||||
groupContent = [data[group][member][chunk]['content'] for member in range(len(data[group]))] # list of each member's chunk
|
||||
if label == 'location':
|
||||
condensedGroupContent = mapFunc(label, groupContent, 'avg') # always average location
|
||||
if 'elem' not in options.separation:
|
||||
condensedGroupContent[0] = 'n/a'
|
||||
if 'node' not in options.separation:
|
||||
condensedGroupContent[1] = 'n/a'
|
||||
if 'grain' not in options.separation:
|
||||
condensedGroupContent[2] = 'n/a'
|
||||
elif len(groupContent) == 1:
|
||||
condensedGroupContent = map(str,groupContent[0])
|
||||
else:
|
||||
condensedGroupContent = mapFunc(label, groupContent, options.func) # map function to groupContent to get condensed data of this group's chunk
|
||||
output[-1] += condensedGroupContent
|
||||
thisHead = heading('_',[[g,component,label] for component in range(int(length>1),length+int(length>1))])
|
||||
if assembleHeader: header += thisHead
|
||||
newby.append({'label':label,
|
||||
'len':length,
|
||||
'content':[ p.element_scalar(e,stat['IndexOfLabel'][head])[n_local].value
|
||||
for head in thisHead ]})
|
||||
|
||||
sortProperties = []
|
||||
for item in options.sort:
|
||||
if item not in options.separation:
|
||||
sortProperties.append(item)
|
||||
assembleHeader = False
|
||||
|
||||
for groupvalues in sortBySeparation(output, options.separation+sortProperties, int(options.time)): # sort output according to separation criteria
|
||||
file.write('\t'.join(map(str,groupvalues)) + '\n')
|
||||
if N == 0: mappedResult = [0.0]*len(header)
|
||||
|
||||
pos = 0
|
||||
for chunk in newby:
|
||||
mappedResult[pos:pos+chunk['len']] = mapIncremental(chunk['label'],options.func,
|
||||
N,mappedResult[pos:pos+chunk['len']],chunk['content'])
|
||||
pos += chunk['len']
|
||||
|
||||
N += 1
|
||||
|
||||
# --- write data row to file ---
|
||||
|
||||
if not headerWritten:
|
||||
file.write('\t'.join(standard + header) + '\n')
|
||||
headerWritten = True
|
||||
|
||||
file.write('\t'.join(map(str,[increment] + \
|
||||
{True:[p.time],False:[]}[options.time] + \
|
||||
group[0] + \
|
||||
mappedResult)
|
||||
) + '\n')
|
||||
|
||||
if fileOpen:
|
||||
file.close()
|
||||
|
||||
|
||||
# --------------------------- DONE --------------------------------
|
||||
|
||||
|
|
Loading…
Reference in New Issue