fixed a nasty bug in reading the header of spectral files

file type is guessed from extension

script tries to gracefully continue when data is missing but not crucial

ASCII file splitting now has correct increment (not position) in its name: poop_inc123.txt refers to true increment 123 not to position 123 in the result file anymore...
This commit is contained in:
Onur Guevenc 2011-06-21 12:38:58 +00:00
parent b6b02f6cf9
commit daa45306f2
1 changed files with 129 additions and 114 deletions

View File

@ -3,7 +3,13 @@
import pdb, os, sys, gc, math, re, threading, time, struct, string
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
releases = {'2010':['linux64',''],
fileExtensions = { \
'marc': ['.t16',],
'spectral': ['.spectralOut',],
}
releases = { \
'2010':['linux64',''],
'2008r1':[''],
'2007r1':[''],
'2005r3':[''],
@ -75,16 +81,16 @@ class MPIEspectral_result: # mimic py_post result object
self.theTitle = self._keyedString('load')
self.wd = self._keyedString('workingdir')
self.geometry = self._keyedString('geometry')
self.N_loadcases = self._keyedInt('loadcases',default=1)
self._frequencies = self._keyedInts('frequencies',self.N_loadcases,1)
self._increments = self._keyedInts('increments',self.N_loadcases)
self.N_loadcases = self._keyedPackedArray('loadcases',count=1,type='i',default=1)[0]
self._frequencies = self._keyedPackedArray('frequencies',count=self.N_loadcases,type='i',default=1)
self._increments = self._keyedPackedArray('increments',count=self.N_loadcases,type='i')
self._increments[0] -= 1 # delete zero'th entry
self._times = self._keyedFloats('times',self.N_loadcases,0.0)
self.dimension = self._keyedPackedArray('dimension',3,'d')
self.resolution = self._keyedPackedArray('resolution',3,'i')
self._times = self._keyedPackedArray('times',count=self.N_loadcases,type='d',default=0.0)
self.dimension = self._keyedPackedArray('dimension',count=3,type='d')
self.resolution = self._keyedPackedArray('resolution',count=3,type='i')
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
self.N_elements = self.resolution[0] * self.resolution[1] * self.resolution[2]
self.N_element_scalars = self._keyedInt('materialpoint_sizeResults')
self.N_element_scalars = self._keyedPackedArray('materialpoint_sizeResults',count=1,type='i',default=0)[0]
self.file.seek(0)
self.dataOffset = self.file.read(2048).find('eoh')+7
@ -97,60 +103,43 @@ class MPIEspectral_result: # mimic py_post result object
def __str__(self):
return '\n'.join([
'title: %s'%self.theTitle,
'workdir: %s'%self.wd,
'geometry: %s'%self.geometry,
'extrapolation: %s'%self.extrapolate,
'loadcases: %i'%self.N_loadcases,
'increments: %i'%self.N_increments,
'increment: %i'%self.increment,
'position: %i'%self.position,
'time: %i'%self.time,
'nodes: %i'%self.N_nodes,
'resolution: %s'%(','.join(map(str,self.resolution))),
'dimension: %s'%(','.join(map(str,self.dimension))),
'elements: %i'%self.N_elements,
'nodal_scalars: %i'%self.N_node_scalars,
'elemental scalars: %i'%self.N_element_scalars,
'elemental tensors: %i'%self.N_element_tensors,
'header size: %i'%self.dataOffset,
'file size: %i'%(self.dataOffset+self.N_increments*(8+self.N_elements*self.N_element_scalars*8)),
]
)
def _keyedPackedArray(self,identifier,length = 3,type = 'd'):
match = {'d': 8,'i': 4}
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{%i})'%(match[type])*length),self.file.read(2048),re.DOTALL)
values = []
if m:
for i in m.groups():
values.append(struct.unpack(type,i)[0])
def locateKeyValue(self,identifier):
key = {'name':'','pos':0}
filepos = 0
while key['name'] != identifier and key['name'] != 'eoh' and filepos < 2048:
self.file.seek(filepos)
tag = self.file.read(4) # read the starting/ending tag
key['name'] = self.file.read(len(identifier)) # anticipate identifier
key['pos'] = self.file.tell() # remember position right after identifier
self.file.seek(filepos+4) # start looking after opening tag
filepos += 4 + self.file.read(2048).find(tag) + 4 # locate end of closing tag
return key
def _keyedPackedArray(self,identifier,count = 3,type = 'd',default = None):
bytecount = {'d': 8,'i': 4}
values = [default]*count
key = self.locateKeyValue(identifier)
if key['name'] == identifier:
self.file.seek(key['pos'])
for i in range(count):
values[i] = struct.unpack(type,self.file.read(bytecount[type]))[0]
return values
def _keyedInt(self,identifier,default=None):
value = default
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{4})'),self.file.read(2048),re.DOTALL)
if m:
value = struct.unpack('i',m.group(1))[0]
return value
def _keyedInts(self,identifier,number=1,default=None):
values = [default]*number
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{4})'*number),self.file.read(2048),re.DOTALL)
if m:
for i in range(number):
values[i] = struct.unpack('i',m.group(1+i))[0]
return values
def _keyedFloats(self,identifier,number=1,default=None):
values = [default]*number
self.file.seek(0)
m = re.search('%s%s'%(identifier,'(.{8})'*number),self.file.read(2048),re.DOTALL)
if m:
for i in range(number):
values[i] = struct.unpack('d',m.group(1+i))[0]
return values
def _keyedString(self,identifier,default=None):
value = default
@ -229,7 +218,12 @@ class MPIEspectral_result: # mimic py_post result object
def element_scalar(self,e,idx):
self.file.seek(self.dataOffset+(self.position*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(e*self.N_element_scalars + idx)*8))
value = struct.unpack('d',self.file.read(8))[0]
try:
value = struct.unpack('d',self.file.read(8))[0]
except:
print 'seeking',self.dataOffset+(self.position*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(e*self.N_element_scalars + idx)*8)
print 'e',e,'idx',idx
sys.exit(1)
return [elemental_scalar(node,value) for node in self.element(e).items]
def element_scalar_label(elem,idx):
@ -434,13 +428,7 @@ def OpenPostfile(name,type):
p = {\
'spectral': MPIEspectral_result,\
'marc': post_open,\
}[type]\
(name+
{\
'marc': '.t16',\
'spectral': '.spectralOut',\
}[type]
)
}[type](name)
p.extrapolation('translate')
p.moveto(1)
@ -555,32 +543,33 @@ def ParsePostfile(p,filename, outputFormat):
stat['IndexOfLabel']['%s'%(var[0])] = startIndex + offset + 1
offset += var[1]
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Crystallite']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
if '(ngrains)' in outputFormat['Homogenization']['specials']:
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Crystallite']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Constitutive']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset + 1
offset += 1
for var in outputFormat['Constitutive']['outputs']:
if var[1] > 1:
for i in range(var[1]):
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + offset + (i+1)
else:
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + offset + 1
offset += var[1]
return stat
# -----------------------------
def SummarizePostfile(stat,where=sys.stdout):
def SummarizePostfile(stat,where=sys.stdout,format='marc'):
# -----------------------------
where.write('\n\n')
@ -638,9 +627,8 @@ parser.add_option('-m','--map', dest='func', type='string', \
help='data reduction mapping ["%default"] out of min, max, avg, sum, abssum or user-lambda')
parser.add_option('-p','--type', dest='filetype', type='string', \
help = 'type of result file [%default]')
group_material = OptionGroup(parser,'Material identifier')
group_special = OptionGroup(parser,'Special outputs')
group_general = OptionGroup(parser,'General outputs')
group_material.add_option('--homogenization', dest='homog', type='string', \
help='homogenization identifier (as string or integer [%default])')
@ -649,6 +637,8 @@ group_material.add_option('--crystallite', dest='cryst', type='string', \
group_material.add_option('--phase', dest='phase', type='string', \
help='phase identifier (as string or integer [%default])')
group_special = OptionGroup(parser,'Special outputs')
group_special.add_option('-t','--time', action='store_true', dest='time', \
help='output time of increment [%default]')
group_special.add_option('-f','--filter', dest='filter', type='string', \
@ -658,6 +648,8 @@ group_special.add_option('--separation', action='extend', dest='separation', typ
group_special.add_option('--sort', action='extend', dest='sort', type='string', \
help='properties to sort results [%default]')
group_general = OptionGroup(parser,'General outputs')
group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \
help='list of nodal scalars to extract')
group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \
@ -679,7 +671,7 @@ parser.set_defaults(info = False)
parser.set_defaults(sloppy = False)
parser.set_defaults(prefix = '')
parser.set_defaults(directory = 'postProc')
parser.set_defaults(filetype = 'marc')
parser.set_defaults(filetype = None)
parser.set_defaults(func = 'avg')
parser.set_defaults(homog = '1')
parser.set_defaults(cryst = '1')
@ -693,8 +685,33 @@ parser.set_defaults(separateFiles = False)
(options, files) = parser.parse_args()
# --- basic sanity checks
if files == []:
parser.print_help()
parser.error('no file specified...')
if not os.path.exists(files[0]):
parser.print_help()
parser.error('invalid file "%s" specified...'%files[0])
# --- figure out filetype
if options.filetype == None:
ext = os.path.splitext(files[0])[1]
for theType in fileExtensions.keys():
if ext in fileExtensions[theType]:
options.filetype = theType
break
options.filetype = options.filetype.lower()
# --- more sanity checks
if options.filetype not in ['marc','spectral']:
parser.print_help()
parser.error('file type "%s" not supported...'%options.filetype)
if options.filetype == 'marc':
try:
file = open('%s/../MSCpath'%os.path.dirname(os.path.realpath(sys.argv[0])))
@ -722,22 +739,6 @@ else:
def post_open():
return
# --- sanity checks
if files == []:
parser.print_help()
parser.error('no file specified...')
if not os.path.exists(files[0]):
parser.print_help()
parser.error('invalid file "%s" specified...'%files[0])
if options.filetype not in ['marc','spectral']:
parser.print_help()
parser.error('file type "%s" not supported...'%options.filetype)
if options.constitutiveResult and not options.phase:
parser.print_help()
parser.error('constitutive results require phase...')
@ -764,7 +765,12 @@ bg.start()
# --- parse .output and .t16 files
filename = os.path.splitext(files[0])[0]
if os.path.splitext(files[0])[1] == '':
filename = files[0]
extension = fileExtensions[options.filetype]
else:
filename = os.path.splitext(files[0])[0]
extension = os.path.splitext(files[0])[1]
outputFormat = {}
me = {
@ -782,7 +788,7 @@ for what in me:
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
bg.set_message('opening result file...')
p = OpenPostfile(filename,options.filetype)
p = OpenPostfile(filename+extension,options.filetype)
bg.set_message('parsing result file...')
stat = ParsePostfile(p, filename, outputFormat)
if options.filetype == 'marc':
@ -806,6 +812,8 @@ for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationRes
if options.info:
if options.filetype == 'marc':
print '\n\nMentat release %s'%release
if options.filetype == 'spectral':
print '\n\n',p
SummarizePostfile(stat,sys.stderr)
@ -837,24 +845,24 @@ for l in elementsOfNode.values():
# --- get output data from .t16 file
increments = range(stat['NumberOfIncrements'])
positions = range(stat['NumberOfIncrements'])
if options.filetype == 'marc':
offset_inc = 1
offset_pos = 1
else:
offset_inc = 0
offset_pos = 0
if options.range:
options.range = list(options.range)
if options.sloppy:
increments = range(options.range[0],options.range[1]+1,options.range[2])
positions = range(options.range[0],options.range[1]+1,options.range[2])
else:
increments = range( max(0,options.range[0]),
positions = range( max(0,options.range[0]),
min(stat['NumberOfIncrements'],options.range[1]+1),
options.range[2])
# --------------------------- build group membership --------------------------------
p.moveto(increments[0]+offset_inc)
p.moveto(positions[0]+offset_pos)
index = {}
groups = []
groupCount = 0
@ -982,19 +990,26 @@ standard = ['inc'] + \
{True: ['node.x','node.y','node.z'],
False:['ip.x','ip.y','ip.z']}[options.nodalScalar != []]
# --------------------------- loop over increments --------------------------------
# --------------------------- loop over positions --------------------------------
bg.set_message('getting increments...')
increments = [None]*len(positions)
for incCount,position in enumerate(positions):
p.moveto(position+offset_pos)
increments[incCount] = p.increment # remember "real" increment at this position
time_start = time.time()
for incCount,increment in enumerate(increments):
p.moveto(increment+offset_inc)
for incCount,position in enumerate(positions):
p.moveto(position+offset_pos)
# --------------------------- file management --------------------------------
if options.separateFiles:
if fileOpen:
file.close()
fileOpen = False
outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + options.prefix + os.path.split(filename)[1],increment)')
outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")+'"%(dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount])')
else:
outFilename = '%s.txt'%(dirname + os.sep + options.prefix + os.path.split(filename)[1])
@ -1017,8 +1032,8 @@ for incCount,increment in enumerate(increments):
for (e,n,i,g,n_local) in group[1:]: # loop over group members
member += 1
if member%1000 == 0:
time_delta = ((len(increments)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increment))
time_delta = ((len(positions)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from position %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,position))
newby = [] # current member's data