only consider increments actually present in (spectral) result

added switch to change from range of positions to range of increments

polished help output
This commit is contained in:
Onur Guevenc 2011-07-21 15:45:41 +00:00
parent 2e9c605571
commit 105a09f3df
1 changed files with 111 additions and 90 deletions

View File

@ -62,6 +62,7 @@ class MPIEspectral_result: # mimic py_post result object
extrapolate = '' extrapolate = ''
N_loadcases = 0 N_loadcases = 0
N_increments = 0 N_increments = 0
N_positions = 0
_frequencies = [] _frequencies = []
_increments = [] _increments = []
_times = [] _times = []
@ -77,7 +78,13 @@ class MPIEspectral_result: # mimic py_post result object
def __init__(self,filename): def __init__(self,filename):
self.file = open(filename, 'rb') self.file = open(filename, 'rb')
self.filesize = os.path.getsize(filename)
self.dataOffset = 0
while self.dataOffset < self.filesize:
self.file.seek(self.dataOffset)
if self.file.read(3) == 'eoh': break
self.dataOffset += 1
self.dataOffset += 7
self.theTitle = self._keyedString('load') self.theTitle = self._keyedString('load')
self.wd = self._keyedString('workingdir') self.wd = self._keyedString('workingdir')
self.geometry = self._keyedString('geometry') self.geometry = self._keyedString('geometry')
@ -91,10 +98,7 @@ class MPIEspectral_result: # mimic py_post result object
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1) self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
self.N_elements = self.resolution[0] * self.resolution[1] * self.resolution[2] self.N_elements = self.resolution[0] * self.resolution[1] * self.resolution[2]
self.N_element_scalars = self._keyedPackedArray('materialpoint_sizeResults',count=1,type='i',default=0)[0] self.N_element_scalars = self._keyedPackedArray('materialpoint_sizeResults',count=1,type='i',default=0)[0]
self.N_positions = (self.filesize-self.dataOffset)/(8+self.N_elements*self.N_element_scalars*8)
self.file.seek(0)
self.dataOffset = self.file.read(2048).find('eoh')+7
self.N_increments = 1 # add zero'th entry self.N_increments = 1 # add zero'th entry
for i in range(self.N_loadcases): for i in range(self.N_loadcases):
self.N_increments += self._increments[i]//self._frequencies[i] self.N_increments += self._increments[i]//self._frequencies[i]
@ -109,7 +113,9 @@ class MPIEspectral_result: # mimic py_post result object
'resolution: %s'%(','.join(map(str,self.resolution))), 'resolution: %s'%(','.join(map(str,self.resolution))),
'dimension: %s'%(','.join(map(str,self.dimension))), 'dimension: %s'%(','.join(map(str,self.dimension))),
'header size: %i'%self.dataOffset, 'header size: %i'%self.dataOffset,
'file size: %i'%(self.dataOffset+self.N_increments*(8+self.N_elements*self.N_element_scalars*8)), 'actual file size: %i'%self.filesize,
'expected file size: %i'%(self.dataOffset+self.N_increments*(8+self.N_elements*self.N_element_scalars*8)),
'positions in file : %i'%self.N_positions,
] ]
) )
@ -118,13 +124,13 @@ class MPIEspectral_result: # mimic py_post result object
key = {'name':'','pos':0} key = {'name':'','pos':0}
filepos = 0 filepos = 0
while key['name'] != identifier and key['name'] != 'eoh' and filepos < 2048: while key['name'] != identifier and key['name'] != 'eoh' and filepos < self.dataOffset:
self.file.seek(filepos) self.file.seek(filepos)
tag = self.file.read(4) # read the starting/ending tag tag = self.file.read(4) # read the starting/ending tag
key['name'] = self.file.read(len(identifier)) # anticipate identifier key['name'] = self.file.read(len(identifier)) # anticipate identifier
key['pos'] = self.file.tell() # remember position right after identifier key['pos'] = self.file.tell() # remember position right after identifier
self.file.seek(filepos+4) # start looking after opening tag self.file.seek(filepos+4) # start looking after opening tag
filepos += 4 + self.file.read(2048).find(tag) + 4 # locate end of closing tag filepos += 4 + self.file.read(self.dataOffset).find(tag) + 4 # locate end of closing tag
return key return key
@ -137,14 +143,13 @@ class MPIEspectral_result: # mimic py_post result object
self.file.seek(key['pos']) self.file.seek(key['pos'])
for i in range(count): for i in range(count):
values[i] = struct.unpack(type,self.file.read(bytecount[type]))[0] values[i] = struct.unpack(type,self.file.read(bytecount[type]))[0]
return values return values
def _keyedString(self,identifier,default=None): def _keyedString(self,identifier,default=None):
value = default value = default
self.file.seek(0) self.file.seek(0)
m = re.search(r'(.{4})%s(.*?)\1'%identifier,self.file.read(2048),re.DOTALL) m = re.search(r'(.{4})%s(.*?)\1'%identifier,self.file.read(self.dataOffset),re.DOTALL)
if m: if m:
value = m.group(2) value = m.group(2)
return value return value
@ -202,7 +207,7 @@ class MPIEspectral_result: # mimic py_post result object
],117)) ],117))
def increments(self): def increments(self):
return self.N_increments return self.N_positions
def nodes(self): def nodes(self):
return self.N_nodes return self.N_nodes
@ -616,53 +621,55 @@ parser.add_option('-i','--info', action='store_true', dest='info', \
help='list contents of resultfile [%default]') help='list contents of resultfile [%default]')
parser.add_option( '--prefix', dest='prefix', \ parser.add_option( '--prefix', dest='prefix', \
help='prefix to result file name [%default]') help='prefix to result file name [%default]')
parser.add_option('-d','--dir', dest='directory', \ parser.add_option('-d','--dir', dest='dir', \
help='name of subdirectory to hold output [%default]') help='name of subdirectory to hold output [%default]')
parser.add_option('-s','--split', action='store_true', dest='separateFiles', \ parser.add_option('-s','--split', action='store_true', dest='separateFiles', \
help='split output per increment [%default]') help='split output per increment [%default]')
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \ parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
help='range of increments to output (start, end, step) [all]') help='range of positions (or increments) to output (start, end, step) [all]')
parser.add_option('--increments', action='store_true', dest='getIncrements', \
help='switch to increment range [%default]')
parser.add_option('--sloppy', action='store_true', dest='sloppy', \ parser.add_option('--sloppy', action='store_true', dest='sloppy', \
help='do not pre-check validity of increment range') help='do not pre-check validity of increment range')
parser.add_option('-m','--map', dest='func', type='string', \ parser.add_option('-m','--map', dest='func', type='string', \
help='data reduction mapping ["%default"] out of min, max, avg, avgabs, sum, sumabs or user-lambda') help='data reduction mapping ["%default"] out of min, max, avg, avgabs, sum, sumabs or user-lambda')
parser.add_option('-p','--type', dest='filetype', type='string', \ parser.add_option('-p','--type', dest='filetype', type='string', \
help = 'type of result file [%default]') help = 'type of result file [auto]')
group_material = OptionGroup(parser,'Material identifier') group_material = OptionGroup(parser,'Material identifier')
group_material.add_option('--homogenization', dest='homog', type='string', \ group_material.add_option('--homogenization', dest='homog', type='string', \
help='homogenization identifier (as string or integer [%default])') help='homogenization identifier (as string or integer [%default])', metavar='<ID>')
group_material.add_option('--crystallite', dest='cryst', type='string', \ group_material.add_option('--crystallite', dest='cryst', type='string', \
help='crystallite identifier (as string or integer [%default])') help='crystallite identifier (as string or integer [%default])', metavar='<ID>')
group_material.add_option('--phase', dest='phase', type='string', \ group_material.add_option('--phase', dest='phase', type='string', \
help='phase identifier (as string or integer [%default])') help='phase identifier (as string or integer [%default])', metavar='<ID>')
group_special = OptionGroup(parser,'Special outputs') group_special = OptionGroup(parser,'Special outputs')
group_special.add_option('-t','--time', action='store_true', dest='time', \ group_special.add_option('-t','--time', action='store_true', dest='time', \
help='output time of increment [%default]') help='output time of increment [%default]')
group_special.add_option('-f','--filter', dest='filter', type='string', \ group_special.add_option('-f','--filter', dest='filter', type='string', \
help='condition(s) to filter results [%default]') help='condition(s) to filter results [%default]', metavar='<CODE>')
group_special.add_option('--separation', action='extend', dest='separation', type='string', \ group_special.add_option('--separation', action='extend', dest='sep', type='string', \
help='properties to separate results [%default]') help='properties to separate results [%default]', metavar='<LIST>')
group_special.add_option('--sort', action='extend', dest='sort', type='string', \ group_special.add_option('--sort', action='extend', dest='sort', type='string', \
help='properties to sort results [%default]') help='properties to sort results [%default]', metavar='<LIST>')
group_general = OptionGroup(parser,'General outputs') group_general = OptionGroup(parser,'General outputs')
group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \ group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \
help='list of nodal scalars to extract') help='nodal scalars to extract', metavar='<LIST>')
group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \ group_general.add_option('--es', action='extend', dest='elemScalar', type='string', \
help='list of elemental scalars to extract') help='elemental scalars to extract', metavar='<LIST>')
group_general.add_option('--et', action='extend', dest='elementalTensor', type='string', \ group_general.add_option('--et', action='extend', dest='elemTensor', type='string', \
help='list of elemental tensors to extract') help='elemental tensors to extract', metavar='<LIST>')
group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \ group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \
help='list of homogenization results to extract') help='homogenization results to extract', metavar='<LIST>')
group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \ group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \
help='list of crystallite results to extract') help='crystallite results to extract', metavar='<LIST>')
group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \ group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \
help='list of constitutive results to extract') help='constitutive results to extract', metavar='<LIST>')
parser.add_option_group(group_material) parser.add_option_group(group_material)
parser.add_option_group(group_general) parser.add_option_group(group_general)
@ -671,18 +678,19 @@ parser.add_option_group(group_special)
parser.set_defaults(info = False) parser.set_defaults(info = False)
parser.set_defaults(sloppy = False) parser.set_defaults(sloppy = False)
parser.set_defaults(prefix = '') parser.set_defaults(prefix = '')
parser.set_defaults(directory = 'postProc') parser.set_defaults(dir = 'postProc')
parser.set_defaults(filetype = None) parser.set_defaults(filetype = None)
parser.set_defaults(func = 'avg') parser.set_defaults(func = 'avg')
parser.set_defaults(homog = '1') parser.set_defaults(homog = '1')
parser.set_defaults(cryst = '1') parser.set_defaults(cryst = '1')
parser.set_defaults(phase = '1') parser.set_defaults(phase = '1')
parser.set_defaults(filter = '') parser.set_defaults(filter = '')
parser.set_defaults(separation = []) parser.set_defaults(sep = [])
parser.set_defaults(sort = []) parser.set_defaults(sort = [])
parser.set_defaults(inc = False) parser.set_defaults(inc = False)
parser.set_defaults(time = False) parser.set_defaults(time = False)
parser.set_defaults(separateFiles = False) parser.set_defaults(separateFiles = False)
parser.set_defaults(getIncrements= False)
(options, files) = parser.parse_args() (options, files) = parser.parse_args()
@ -744,20 +752,20 @@ if options.constitutiveResult and not options.phase:
parser.print_help() parser.print_help()
parser.error('constitutive results require phase...') parser.error('constitutive results require phase...')
if options.nodalScalar and ( options.elementalScalar or options.elementalTensor if options.nodalScalar and ( options.elemScalar or options.elemTensor
or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ): or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ):
parser.print_help() parser.print_help()
parser.error('not allowed to mix nodal with elemental results...') parser.error('not allowed to mix nodal with elemental results...')
if not options.nodalScalar: options.nodalScalar = [] if not options.nodalScalar: options.nodalScalar = []
if not options.elementalScalar: options.elementalScalar = [] if not options.elemScalar: options.elemScalar = []
if not options.elementalTensor: options.elementalTensor = [] if not options.elemTensor: options.elemTensor = []
if not options.homogenizationResult: options.homogenizationResult = [] if not options.homogenizationResult: options.homogenizationResult = []
if not options.crystalliteResult: options.crystalliteResult = [] if not options.crystalliteResult: options.crystalliteResult = []
if not options.constitutiveResult: options.constitutiveResult = [] if not options.constitutiveResult: options.constitutiveResult = []
options.sort.reverse() options.sort.reverse()
options.separation.reverse() options.sep.reverse()
# --- start background messaging # --- start background messaging
@ -785,8 +793,8 @@ bg.set_message('parsing .output files...')
for what in me: for what in me:
outputFormat[what] = ParseOutputFormat(filename, what, me[what]) outputFormat[what] = ParseOutputFormat(filename, what, me[what])
if not '_id' in outputFormat[what]['specials']: if not '_id' in outputFormat[what]['specials']:
print "'%s' not found in <%s>"%(me[what], what) print "\nsection '%s' not found in <%s>"%(me[what], what)
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers'])) print '\n'.join(map(lambda x:' [%s]'%x, outputFormat[what]['specials']['brothers']))
bg.set_message('opening result file...') bg.set_message('opening result file...')
p = OpenPostfile(filename+extension,options.filetype) p = OpenPostfile(filename+extension,options.filetype)
@ -796,13 +804,13 @@ if options.filetype == 'marc':
stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0) stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0)
# --- sanity check for output variables # --- sanity check for output variables
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary # for mentat variables (nodalScalar,elemScalar,elemTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different # for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different
for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationResult','crystalliteResult','constitutiveResult']: for opt in ['nodalScalar','elemScalar','elemTensor','homogenizationResult','crystalliteResult','constitutiveResult']:
if eval('options.%s'%opt): if eval('options.%s'%opt):
for label in eval('options.%s'%opt): for label in eval('options.%s'%opt):
if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and label not in stat['IndexOfLabel'] and label not in ['elements',]) \ if (opt in ['nodalScalar','elemScalar','elemTensor'] and label not in stat['IndexOfLabel'] and label not in ['elements',]) \
or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \ or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \
and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])): and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])):
parser.error('%s "%s" unknown...'%(opt,label)) parser.error('%s "%s" unknown...'%(opt,label))
@ -851,14 +859,6 @@ if options.filetype == 'marc':
offset_pos = 1 offset_pos = 1
else: else:
offset_pos = 0 offset_pos = 0
if options.range:
options.range = list(options.range)
if options.sloppy:
positions = range(options.range[0],options.range[1]+1,options.range[2])
else:
positions = range( max(0,options.range[0]),
min(stat['NumberOfIncrements'],options.range[1]+1),
options.range[2])
# --------------------------- build group membership -------------------------------- # --------------------------- build group membership --------------------------------
@ -887,7 +887,7 @@ if options.nodalScalar:
# --- group data locations # --- group data locations
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myNodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
if grp not in index: # create a new group if not yet present if grp not in index: # create a new group if not yet present
index[grp] = groupCount index[grp] = groupCount
@ -929,7 +929,7 @@ else:
# --- group data locations # --- group data locations
grp = substituteLocation('#'.join(options.separation), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
if grp not in index: # create a new group if not yet present if grp not in index: # create a new group if not yet present
index[grp] = groupCount index[grp] = groupCount
@ -961,7 +961,7 @@ where = {
} }
sortProperties = [] sortProperties = []
for item in options.separation: for item in options.sep:
if item not in options.sort: if item not in options.sort:
sortProperties.append(item) sortProperties.append(item)
@ -975,9 +975,9 @@ bg.set_message('sorting groups...')
groups.sort(key = sortKeys) # in-place sorting to save mem groups.sort(key = sortKeys) # in-place sorting to save mem
# --------------------------- create output directory -------------------------------- # --------------------------- create output dir --------------------------------
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.dir
if not os.path.isdir(dirname): if not os.path.isdir(dirname):
os.mkdir(dirname,0755) os.mkdir(dirname,0755)
@ -993,16 +993,37 @@ standard = ['inc'] + \
# --------------------------- loop over positions -------------------------------- # --------------------------- loop over positions --------------------------------
bg.set_message('getting increments...') bg.set_message('getting map between positions and increments...')
increments = [None]*len(positions)
for incCount,position in enumerate(positions): incAtPosition = {}
positionOfInc = {}
for position in range(stat['NumberOfIncrements']):
p.moveto(position+offset_pos) p.moveto(position+offset_pos)
increments[incCount] = p.increment # remember "real" increment at this position incAtPosition[position] = p.increment # remember "real" increment at this position
positionOfInc[p.increment] = position # remember position of "real" increment
if options.range:
options.range = list(options.range)
if options.sloppy:
locations = range(options.range[0],options.range[1]+1,options.range[2])
else:
locations = range( max(0,options.range[0]),
min({False:stat['NumberOfIncrements'],
True :incAtPosition[stat['NumberOfIncrements']-1]+1}[options.getIncrements],
options.range[1]+1),
options.range[2] )
time_start = time.time() time_start = time.time()
for incCount,position in enumerate(positions): for incCount,location in enumerate(locations):
if options.getIncrements:
position = positionOfInc[location]
else:
position = location
p.moveto(position+offset_pos) p.moveto(position+offset_pos)
# --------------------------- file management -------------------------------- # --------------------------- file management --------------------------------
@ -1051,16 +1072,16 @@ for incCount,position in enumerate(positions):
'len':length, 'len':length,
'content':content }) 'content':content })
if options.elementalScalar: if options.elemScalar:
for label in options.elementalScalar: for label in options.elemScalar:
if assembleHeader: if assembleHeader:
header += [label.replace(' ','')] header += [label.replace(' ','')]
newby.append({'label':label, newby.append({'label':label,
'len':1, 'len':1,
'content':[ p.element_scalar(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local].value ]}) 'content':[ p.element_scalar(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local].value ]})
if options.elementalTensor: if options.elemTensor:
for label in options.elementalTensor: for label in options.elemTensor:
if assembleHeader: if assembleHeader:
header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']]) header += heading('.',[[label.replace(' ',''),component] for component in ['intensity','t11','t22','t33','t12','t23','t13']])
myTensor = p.element_tensor(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local] myTensor = p.element_tensor(p.element_sequence(e),stat['IndexOfLabel'][label])[n_local]