deleted useless storage of data resulting in partial speed-up.
large datasets (>1 mio elements) cannot be processed at the moment---investigation required, why each element eats about 20kB of memory?
This commit is contained in:
parent
97a0146672
commit
6ac2b4cf88
|
@ -61,8 +61,6 @@ class MPIEspectral_result: # mimic py_post result object
|
||||||
N_elements = 0
|
N_elements = 0
|
||||||
N_element_scalars = 0
|
N_element_scalars = 0
|
||||||
N_element_tensors = 0
|
N_element_tensors = 0
|
||||||
theNodes = []
|
|
||||||
theElements = []
|
|
||||||
|
|
||||||
def __init__(self,filename):
|
def __init__(self,filename):
|
||||||
|
|
||||||
|
@ -76,23 +74,7 @@ class MPIEspectral_result: # mimic py_post result object
|
||||||
self.resolution = self._keyedPackedArray('resolution',3,'i')
|
self.resolution = self._keyedPackedArray('resolution',3,'i')
|
||||||
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
|
self.N_nodes = (self.resolution[0]+1)*(self.resolution[1]+1)*(self.resolution[2]+1)
|
||||||
self.N_elements = self.resolution[0]*self.resolution[1]*self.resolution[2]
|
self.N_elements = self.resolution[0]*self.resolution[1]*self.resolution[2]
|
||||||
|
|
||||||
self.dimension = self._keyedPackedArray('dimension',3,'d')
|
self.dimension = self._keyedPackedArray('dimension',3,'d')
|
||||||
a = self.resolution[0]+1
|
|
||||||
b = self.resolution[1]+1
|
|
||||||
c = self.resolution[2]+1
|
|
||||||
for n in range(self.N_nodes):
|
|
||||||
self.theNodes.append(vector([self.dimension[0] * (n%a) / self.resolution[0],
|
|
||||||
self.dimension[1] * ((n/a)%b) / self.resolution[1],
|
|
||||||
self.dimension[2] * ((n/a/b)%c) / self.resolution[2],
|
|
||||||
]))
|
|
||||||
|
|
||||||
for e in range(self.N_elements):
|
|
||||||
basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
|
|
||||||
basenode2 = basenode+a*b
|
|
||||||
self.theElements.append(element([basenode ,basenode+1 ,basenode+a+1 ,basenode+a,
|
|
||||||
basenode2,basenode2+1,basenode2+a+1,basenode2+a,
|
|
||||||
],117))
|
|
||||||
|
|
||||||
self.file.seek(0)
|
self.file.seek(0)
|
||||||
self.dataOffset = self.file.read(2048).find('eoh')+7
|
self.dataOffset = self.file.read(2048).find('eoh')+7
|
||||||
|
@ -150,20 +132,33 @@ class MPIEspectral_result: # mimic py_post result object
|
||||||
def extrapolation(self,value):
|
def extrapolation(self,value):
|
||||||
self.extrapolate = value
|
self.extrapolate = value
|
||||||
|
|
||||||
def node_sequence(self,node):
|
def node_sequence(self,n):
|
||||||
return node
|
return n
|
||||||
|
|
||||||
def node_id(self,node):
|
def node_id(self,n):
|
||||||
return node+1
|
return n+1
|
||||||
|
|
||||||
def node(self,node):
|
def node(self,n):
|
||||||
return self.theNodes[node]
|
a = self.resolution[0]+1
|
||||||
|
b = self.resolution[1]+1
|
||||||
|
c = self.resolution[2]+1
|
||||||
|
return vector([self.dimension[0] * (n%a) / self.resolution[0],
|
||||||
|
self.dimension[1] * ((n/a)%b) / self.resolution[1],
|
||||||
|
self.dimension[2] * ((n/a/b)%c) / self.resolution[2],
|
||||||
|
])
|
||||||
|
|
||||||
def element_id(self,elem):
|
def element_id(self,e):
|
||||||
return elem+1
|
return e+1
|
||||||
|
|
||||||
def element(self,elem):
|
def element(self,e):
|
||||||
return self.theElements[elem]
|
a = self.resolution[0]+1
|
||||||
|
b = self.resolution[1]+1
|
||||||
|
c = self.resolution[2]+1
|
||||||
|
basenode = e+e/self.resolution[0] + e/self.resolution[0]/self.resolution[1]*a
|
||||||
|
basenode2 = basenode+a*b
|
||||||
|
return (element([basenode ,basenode+1 ,basenode+a+1 ,basenode+a,
|
||||||
|
basenode2,basenode2+1,basenode2+a+1,basenode2+a,
|
||||||
|
],117))
|
||||||
|
|
||||||
def increments(self):
|
def increments(self):
|
||||||
return self.N_increments
|
return self.N_increments
|
||||||
|
@ -180,10 +175,10 @@ class MPIEspectral_result: # mimic py_post result object
|
||||||
def element_scalars(self):
|
def element_scalars(self):
|
||||||
return self.N_element_scalars
|
return self.N_element_scalars
|
||||||
|
|
||||||
def element_scalar(self,elem,idx):
|
def element_scalar(self,e,idx):
|
||||||
self.file.seek(self.dataOffset+(self.increment*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(elem*self.N_element_scalars + idx)*8))
|
self.file.seek(self.dataOffset+(self.increment*(4+self.N_elements*self.N_element_scalars*8+4) + 4+(e*self.N_element_scalars + idx)*8))
|
||||||
value = struct.unpack('d',self.file.read(8))[0]
|
value = struct.unpack('d',self.file.read(8))[0]
|
||||||
return [elemental_scalar(node,value) for node in self.theElements[elem].items]
|
return [elemental_scalar(node,value) for node in self.element(e).items]
|
||||||
|
|
||||||
def element_scalar_label(elem,idx):
|
def element_scalar_label(elem,idx):
|
||||||
return 'User Defined Variable %i'%(idx+1)
|
return 'User Defined Variable %i'%(idx+1)
|
||||||
|
@ -221,7 +216,7 @@ class backgroundMessage(threading.Thread):
|
||||||
self.message = ''
|
self.message = ''
|
||||||
self.new_message = ''
|
self.new_message = ''
|
||||||
self.counter = 0
|
self.counter = 0
|
||||||
self.symbols = ['- ', '\ ', '| ', '/ ']
|
self.symbols = ['- ', '\ ', '| ', '/ ',]
|
||||||
self.waittime = 0.5
|
self.waittime = 0.5
|
||||||
|
|
||||||
def __quit__(self):
|
def __quit__(self):
|
||||||
|
@ -529,9 +524,10 @@ def ParsePostfile(p,filename, outputFormat):
|
||||||
def SummarizePostfile(stat,where=sys.stdout):
|
def SummarizePostfile(stat,where=sys.stdout):
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
|
|
||||||
|
where.write('\n\n')
|
||||||
where.write('title:\t%s'%stat['Title'] + '\n\n')
|
where.write('title:\t%s'%stat['Title'] + '\n\n')
|
||||||
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
|
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
|
||||||
where.write('increments:\t%i+1'%(stat['NumberOfIncrements']-1) + '\n\n')
|
where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n')
|
||||||
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
|
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
|
||||||
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
|
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
|
||||||
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
|
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
|
||||||
|
@ -625,7 +621,7 @@ parser.set_defaults(inc = False)
|
||||||
parser.set_defaults(time = False)
|
parser.set_defaults(time = False)
|
||||||
parser.set_defaults(separateFiles = False)
|
parser.set_defaults(separateFiles = False)
|
||||||
|
|
||||||
(options, file) = parser.parse_args()
|
(options, files) = parser.parse_args()
|
||||||
|
|
||||||
options.filetype = options.filetype.lower()
|
options.filetype = options.filetype.lower()
|
||||||
|
|
||||||
|
@ -658,19 +654,15 @@ else:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bg = backgroundMessage()
|
|
||||||
bg.start()
|
|
||||||
|
|
||||||
|
|
||||||
# --- sanity checks
|
# --- sanity checks
|
||||||
|
|
||||||
if file == []:
|
if files == []:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
parser.error('no file specified...')
|
parser.error('no file specified...')
|
||||||
|
|
||||||
if not os.path.exists(file[0]):
|
if not os.path.exists(files[0]):
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
parser.error('invalid file "%s" specified...'%file[0])
|
parser.error('invalid file "%s" specified...'%files[0])
|
||||||
|
|
||||||
if options.filetype not in ['marc','spectral']:
|
if options.filetype not in ['marc','spectral']:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
|
@ -686,11 +678,14 @@ if options.nodalScalar and ( options.elementalScalar or options.elementalTenso
|
||||||
parser.error('not allowed to mix nodal with elemental results...')
|
parser.error('not allowed to mix nodal with elemental results...')
|
||||||
|
|
||||||
|
|
||||||
|
# --- start background messaging
|
||||||
|
|
||||||
|
bg = backgroundMessage()
|
||||||
|
bg.start()
|
||||||
|
|
||||||
# --- parse .output and .t16 files
|
# --- parse .output and .t16 files
|
||||||
|
|
||||||
bg.set_message('parsing .output and result files...')
|
filename = os.path.splitext(files[0])[0]
|
||||||
|
|
||||||
filename = os.path.splitext(file[0])[0]
|
|
||||||
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory
|
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory
|
||||||
if not os.path.isdir(dirname):
|
if not os.path.isdir(dirname):
|
||||||
os.mkdir(dirname,0755)
|
os.mkdir(dirname,0755)
|
||||||
|
@ -701,6 +696,9 @@ me = {
|
||||||
'Crystallite': options.cryst,
|
'Crystallite': options.cryst,
|
||||||
'Constitutive': options.phase,
|
'Constitutive': options.phase,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bg.set_message('parsing .output files...')
|
||||||
|
|
||||||
for what in me:
|
for what in me:
|
||||||
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
|
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
|
||||||
if not '_id' in outputFormat[what]['specials']:
|
if not '_id' in outputFormat[what]['specials']:
|
||||||
|
@ -708,9 +706,12 @@ for what in me:
|
||||||
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
|
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
bg.set_message('opening result file...')
|
||||||
p = OpenPostfile(filename,options.filetype)
|
p = OpenPostfile(filename,options.filetype)
|
||||||
|
bg.set_message('parsing result file...')
|
||||||
stat = ParsePostfile(p, filename, outputFormat)
|
stat = ParsePostfile(p, filename, outputFormat)
|
||||||
|
if options.filetype == 'marc':
|
||||||
|
stat['NumberOfIncrements'] -= 1 # t16 contains one "virtual" increment (at 0)
|
||||||
|
|
||||||
# --- sanity check for output variables
|
# --- sanity check for output variables
|
||||||
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
|
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
|
||||||
|
@ -729,7 +730,7 @@ for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationRes
|
||||||
|
|
||||||
if options.info:
|
if options.info:
|
||||||
if options.filetype == 'marc':
|
if options.filetype == 'marc':
|
||||||
print '\nMentat release %s\n'%release
|
print '\n\nMentat release %s'%release
|
||||||
|
|
||||||
SummarizePostfile(stat,sys.stderr)
|
SummarizePostfile(stat,sys.stderr)
|
||||||
|
|
||||||
|
@ -745,12 +746,19 @@ if options.info:
|
||||||
# --- get output data from .t16 file
|
# --- get output data from .t16 file
|
||||||
|
|
||||||
|
|
||||||
|
increments = range(stat['NumberOfIncrements'])
|
||||||
|
if options.filetype == 'marc':
|
||||||
|
offset_inc = 1
|
||||||
|
else:
|
||||||
|
offset_inc = 0
|
||||||
if options.range:
|
if options.range:
|
||||||
|
options.range = list(options.range)
|
||||||
|
if options.filetype == 'spectral':
|
||||||
|
options.range[0] -= 1
|
||||||
|
options.range[1] -= 1
|
||||||
increments = range( max(0,options.range[0]),
|
increments = range( max(0,options.range[0]),
|
||||||
min(stat['NumberOfIncrements'],options.range[1]+1),
|
min(stat['NumberOfIncrements'],options.range[1]+1),
|
||||||
options.range[2])
|
options.range[2])
|
||||||
else:
|
|
||||||
increments = range(stat['NumberOfIncrements']-1)
|
|
||||||
|
|
||||||
fileOpen = False
|
fileOpen = False
|
||||||
assembleHeader = True
|
assembleHeader = True
|
||||||
|
@ -759,40 +767,22 @@ header = []
|
||||||
element_scalar = {}
|
element_scalar = {}
|
||||||
element_tensor = {}
|
element_tensor = {}
|
||||||
|
|
||||||
# --- store geometry information
|
|
||||||
|
|
||||||
p.moveto(0)
|
|
||||||
|
|
||||||
nodeID = [ 0 for n in range(stat['NumberOfNodes'])]
|
|
||||||
nodeCoordinates = [[] for n in range(stat['NumberOfNodes'])]
|
|
||||||
|
|
||||||
elemID = [ 0 for e in range(stat['NumberOfElements'])]
|
|
||||||
elemNodeID = [[] for e in range(stat['NumberOfElements'])]
|
|
||||||
ipCoordinates = [[] for e in range(stat['NumberOfElements'])]
|
|
||||||
|
|
||||||
for n in range(stat['NumberOfNodes']):
|
|
||||||
nodeID[n] = p.node_id(n)
|
|
||||||
nodeCoordinates[n] = [p.node(n).x, p.node(n).y, p.node(n).z]
|
|
||||||
|
|
||||||
for e in range(stat['NumberOfElements']):
|
|
||||||
elemID[e] = p.element_id(e)
|
|
||||||
elemNodeID[e] = p.element(e).items
|
|
||||||
ipCoordinates[e] = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items))))
|
|
||||||
|
|
||||||
# --- loop over increments
|
# --- loop over increments
|
||||||
|
|
||||||
time_start = time.time()
|
time_start = time.time()
|
||||||
|
|
||||||
for incCount,increment in enumerate(increments):
|
for incCount,increment in enumerate(increments):
|
||||||
p.moveto(increment+1)
|
p.moveto(increment+offset_inc)
|
||||||
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
|
|
||||||
bg.set_message('(%02i:%02i:%02i) read data from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,increment))
|
|
||||||
data = {}
|
data = {}
|
||||||
|
|
||||||
if options.nodalScalar:
|
if options.nodalScalar:
|
||||||
for n in range(stat['NumberOfNodes']):
|
for n in range(stat['NumberOfNodes']):
|
||||||
myNodeID = nodeID[n]
|
if n%100 == 0:
|
||||||
myNodeCoordinates = nodeCoordinates[n]
|
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
|
||||||
|
bg.set_message('(%02i:%02i:%02i) read node %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,n,increment))
|
||||||
|
myNodeID = p.node_id(n)
|
||||||
|
myNodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
|
||||||
myElemID = 0
|
myElemID = 0
|
||||||
myGrainID = 0
|
myGrainID = 0
|
||||||
|
|
||||||
|
@ -827,9 +817,12 @@ for incCount,increment in enumerate(increments):
|
||||||
|
|
||||||
else:
|
else:
|
||||||
for e in range(stat['NumberOfElements']):
|
for e in range(stat['NumberOfElements']):
|
||||||
myElemID = elemID[e]
|
if e%100 == 0:
|
||||||
myIpCoordinates = ipCoordinates[e]
|
time_delta = (len(increments)-incCount)*(time.time()-time_start)/max(1.0,incCount)
|
||||||
for n,myNodeID in enumerate(elemNodeID[e]):
|
bg.set_message('(%02i:%02i:%02i) read elem %i from increment %i...'%(time_delta//3600,time_delta%3600//60,time_delta%60,e,increment))
|
||||||
|
myElemID = p.element_id(e)
|
||||||
|
myIpCoordinates = ipCoords(p.element(e).type, map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items))))
|
||||||
|
for n,myNodeID in enumerate(p.element(e).items):
|
||||||
for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))
|
for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))
|
||||||
or 1):
|
or 1):
|
||||||
myGrainID = g + 1
|
myGrainID = g + 1
|
||||||
|
|
Loading…
Reference in New Issue