2010-08-17 02:17:27 +05:30
#!/usr/bin/env python
2011-05-24 22:53:22 +05:30
import pdb , os , sys , gc , math , re , threading , time , struct , string
2012-01-16 15:02:36 +05:30
import damask
2010-08-17 02:17:27 +05:30
from optparse import OptionParser , OptionGroup , Option , SUPPRESS_HELP
2010-08-17 23:51:22 +05:30
2011-11-23 14:39:00 +05:30
2011-06-21 18:08:58 +05:30
fileExtensions = { \
' marc ' : [ ' .t16 ' , ] ,
' spectral ' : [ ' .spectralOut ' , ] ,
}
releases = { \
' 2010 ' : [ ' linux64 ' , ' ' ] ,
2010-08-17 23:51:22 +05:30
' 2008r1 ' : [ ' ' ] ,
' 2007r1 ' : [ ' ' ] ,
' 2005r3 ' : [ ' ' ] ,
}
2010-08-17 02:17:27 +05:30
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class vector : # mimic py_post node object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
x , y , z = [ None , None , None ]
def __init__ ( self , coords ) :
self . x = coords [ 0 ]
self . y = coords [ 1 ]
self . z = coords [ 2 ]
2011-01-12 22:25:56 +05:30
# -----------------------------
class element : # mimic py_post element object
# -----------------------------
2011-02-01 23:55:40 +05:30
items = [ ]
type = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__ ( self , nodes , type ) :
self . items = nodes
self . type = type
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class elemental_scalar : # mimic py_post element_scalar object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
id = None
value = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__ ( self , node , value ) :
self . id = node
self . value = value
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class MPIEspectral_result : # mimic py_post result object
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-02-01 23:55:40 +05:30
file = None
dataOffset = 0
N_elemental_scalars = 0
resolution = [ 0 , 0 , 0 ]
dimension = [ 0.0 , 0.0 , 0.0 ]
theTitle = ' '
wd = ' '
2011-02-21 22:00:18 +05:30
geometry = ' '
2011-02-01 23:55:40 +05:30
extrapolate = ' '
2011-06-15 23:19:59 +05:30
N_loadcases = 0
2011-02-01 23:55:40 +05:30
N_increments = 0
2011-07-21 21:15:41 +05:30
N_positions = 0
2011-06-15 23:19:59 +05:30
_frequencies = [ ]
_increments = [ ]
_times = [ ]
2011-02-01 23:55:40 +05:30
increment = 0
2011-06-15 23:19:59 +05:30
position = 0
2011-04-12 23:16:35 +05:30
time = 0.0 # this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
2011-02-01 23:55:40 +05:30
N_nodes = 0
N_node_scalars = 0
N_elements = 0
N_element_scalars = 0
N_element_tensors = 0
def __init__ ( self , filename ) :
self . file = open ( filename , ' rb ' )
2011-07-21 21:15:41 +05:30
self . filesize = os . path . getsize ( filename )
self . dataOffset = 0
while self . dataOffset < self . filesize :
self . file . seek ( self . dataOffset )
if self . file . read ( 3 ) == ' eoh ' : break
self . dataOffset + = 1
self . dataOffset + = 7
2011-06-15 23:19:59 +05:30
self . theTitle = self . _keyedString ( ' load ' )
self . wd = self . _keyedString ( ' workingdir ' )
self . geometry = self . _keyedString ( ' geometry ' )
2011-06-21 18:08:58 +05:30
self . N_loadcases = self . _keyedPackedArray ( ' loadcases ' , count = 1 , type = ' i ' , default = 1 ) [ 0 ]
self . _frequencies = self . _keyedPackedArray ( ' frequencies ' , count = self . N_loadcases , type = ' i ' , default = 1 )
self . _increments = self . _keyedPackedArray ( ' increments ' , count = self . N_loadcases , type = ' i ' )
2012-02-13 19:48:07 +05:30
#self._increments[0] -= 1 # delete zero'th entry (might be needed for older spectralOuts
2011-06-21 18:08:58 +05:30
self . _times = self . _keyedPackedArray ( ' times ' , count = self . N_loadcases , type = ' d ' , default = 0.0 )
2012-01-12 13:36:25 +05:30
self . _logscales = self . _keyedPackedArray ( ' logscales ' , count = self . N_loadcases , type = ' i ' , default = 0 )
2011-06-21 18:08:58 +05:30
self . dimension = self . _keyedPackedArray ( ' dimension ' , count = 3 , type = ' d ' )
self . resolution = self . _keyedPackedArray ( ' resolution ' , count = 3 , type = ' i ' )
2011-06-15 23:19:59 +05:30
self . N_nodes = ( self . resolution [ 0 ] + 1 ) * ( self . resolution [ 1 ] + 1 ) * ( self . resolution [ 2 ] + 1 )
self . N_elements = self . resolution [ 0 ] * self . resolution [ 1 ] * self . resolution [ 2 ]
2011-06-21 18:08:58 +05:30
self . N_element_scalars = self . _keyedPackedArray ( ' materialpoint_sizeResults ' , count = 1 , type = ' i ' , default = 0 ) [ 0 ]
2011-07-21 21:15:41 +05:30
self . N_positions = ( self . filesize - self . dataOffset ) / ( 8 + self . N_elements * self . N_element_scalars * 8 )
2011-06-15 23:19:59 +05:30
self . N_increments = 1 # add zero'th entry
for i in range ( self . N_loadcases ) :
self . N_increments + = self . _increments [ i ] / / self . _frequencies [ i ]
2011-02-01 23:55:40 +05:30
def __str__ ( self ) :
return ' \n ' . join ( [
' workdir: %s ' % self . wd ,
2011-02-21 22:00:18 +05:30
' geometry: %s ' % self . geometry ,
2011-06-15 23:19:59 +05:30
' loadcases: %i ' % self . N_loadcases ,
2011-02-01 23:55:40 +05:30
' resolution: %s ' % ( ' , ' . join ( map ( str , self . resolution ) ) ) ,
' dimension: %s ' % ( ' , ' . join ( map ( str , self . dimension ) ) ) ,
2011-06-21 18:08:58 +05:30
' header size: %i ' % self . dataOffset ,
2011-07-21 21:15:41 +05:30
' actual file size: %i ' % self . filesize ,
' expected file size: %i ' % ( self . dataOffset + self . N_increments * ( 8 + self . N_elements * self . N_element_scalars * 8 ) ) ,
' positions in file : %i ' % self . N_positions ,
2011-02-01 23:55:40 +05:30
]
)
2011-06-21 18:08:58 +05:30
def locateKeyValue ( self , identifier ) :
2011-02-01 23:55:40 +05:30
2011-06-21 18:08:58 +05:30
key = { ' name ' : ' ' , ' pos ' : 0 }
filepos = 0
2011-07-21 21:15:41 +05:30
while key [ ' name ' ] != identifier and key [ ' name ' ] != ' eoh ' and filepos < self . dataOffset :
2011-06-21 18:08:58 +05:30
self . file . seek ( filepos )
2011-07-21 21:15:41 +05:30
tag = self . file . read ( 4 ) # read the starting/ending tag
key [ ' name ' ] = self . file . read ( len ( identifier ) ) # anticipate identifier
key [ ' pos ' ] = self . file . tell ( ) # remember position right after identifier
self . file . seek ( filepos + 4 ) # start looking after opening tag
filepos + = 4 + self . file . read ( self . dataOffset ) . find ( tag ) + 4 # locate end of closing tag
2011-06-15 23:19:59 +05:30
2011-06-21 18:08:58 +05:30
return key
def _keyedPackedArray ( self , identifier , count = 3 , type = ' d ' , default = None ) :
bytecount = { ' d ' : 8 , ' i ' : 4 }
values = [ default ] * count
key = self . locateKeyValue ( identifier )
if key [ ' name ' ] == identifier :
self . file . seek ( key [ ' pos ' ] )
for i in range ( count ) :
values [ i ] = struct . unpack ( type , self . file . read ( bytecount [ type ] ) ) [ 0 ]
2011-06-15 23:19:59 +05:30
return values
2011-06-21 18:08:58 +05:30
2011-06-15 23:19:59 +05:30
def _keyedString ( self , identifier , default = None ) :
value = default
2011-02-01 23:55:40 +05:30
self . file . seek ( 0 )
2011-07-21 21:15:41 +05:30
m = re . search ( r ' (. {4} ) %s (.*?) \ 1 ' % identifier , self . file . read ( self . dataOffset ) , re . DOTALL )
2011-02-01 23:55:40 +05:30
if m :
value = m . group ( 2 )
return value
def title ( self ) :
return self . theTitle
2011-06-15 23:19:59 +05:30
def moveto ( self , pos ) :
self . position = pos
self . increment = 0
self . time = 0.0
p = pos
for l in range ( self . N_loadcases ) :
if p < = self . _increments [ l ] / / self . _frequencies [ l ] :
break
else :
self . increment + = self . _increments [ l ]
self . time + = self . _times [ l ]
p - = self . _increments [ l ] / / self . _frequencies [ l ]
2012-01-12 13:36:25 +05:30
2011-06-15 23:19:59 +05:30
self . increment + = self . _frequencies [ l ] * p
2012-01-12 13:36:25 +05:30
if self . _logscales [ l ] > 0 : # logarithmic time scale
if l == 0 : self . time = 2 * * ( self . _increments [ l ] - ( 1 + self . _frequencies [ l ] * p ) ) * self . _times [ l ] # first loadcase
else : self . time * = ( ( self . time + self . _times [ l ] ) / self . time ) * * ( ( 1 + self . _frequencies [ l ] * p ) / self . _increments [ l ] ) # any subsequent loadcase
else : # linear time scale
self . time + = self . _times [ l ] / self . _increments [ l ] * self . _frequencies [ l ] * p
2011-02-01 23:55:40 +05:30
def extrapolation ( self , value ) :
self . extrapolate = value
2011-03-11 22:02:01 +05:30
def node_sequence ( self , n ) :
2011-04-13 22:01:44 +05:30
return n - 1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node_id ( self , n ) :
return n + 1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node ( self , n ) :
a = self . resolution [ 0 ] + 1
b = self . resolution [ 1 ] + 1
c = self . resolution [ 2 ] + 1
return vector ( [ self . dimension [ 0 ] * ( n % a ) / self . resolution [ 0 ] ,
self . dimension [ 1 ] * ( ( n / a ) % b ) / self . resolution [ 1 ] ,
self . dimension [ 2 ] * ( ( n / a / b ) % c ) / self . resolution [ 2 ] ,
2011-04-12 23:16:35 +05:30
] )
2011-03-11 22:02:01 +05:30
2011-04-13 22:01:44 +05:30
def element_sequence ( self , e ) :
return e - 1
2011-03-11 22:02:01 +05:30
def element_id ( self , e ) :
return e + 1
2011-04-12 23:16:35 +05:30
2011-03-11 22:02:01 +05:30
def element ( self , e ) :
2011-06-15 23:19:59 +05:30
a = self . resolution [ 0 ] + 1
b = self . resolution [ 1 ] + 1
c = self . resolution [ 2 ] + 1
basenode = 1 + e + e / self . resolution [ 0 ] + e / self . resolution [ 0 ] / self . resolution [ 1 ] * a
basenode2 = basenode + a * b
return ( element ( [ basenode , basenode + 1 , basenode + a + 1 , basenode + a ,
basenode2 , basenode2 + 1 , basenode2 + a + 1 , basenode2 + a ,
] , 117 ) )
2011-02-01 23:55:40 +05:30
def increments ( self ) :
2011-07-21 21:15:41 +05:30
return self . N_positions
2011-02-01 23:55:40 +05:30
def nodes ( self ) :
return self . N_nodes
def node_scalars ( self ) :
return self . N_node_scalars
def elements ( self ) :
return self . N_elements
def element_scalars ( self ) :
return self . N_element_scalars
2011-03-11 22:02:01 +05:30
def element_scalar ( self , e , idx ) :
2011-06-15 23:19:59 +05:30
self . file . seek ( self . dataOffset + ( self . position * ( 4 + self . N_elements * self . N_element_scalars * 8 + 4 ) + 4 + ( e * self . N_element_scalars + idx ) * 8 ) )
2011-06-21 18:08:58 +05:30
try :
value = struct . unpack ( ' d ' , self . file . read ( 8 ) ) [ 0 ]
except :
print ' seeking ' , self . dataOffset + ( self . position * ( 4 + self . N_elements * self . N_element_scalars * 8 + 4 ) + 4 + ( e * self . N_element_scalars + idx ) * 8 )
print ' e ' , e , ' idx ' , idx
sys . exit ( 1 )
2011-03-11 22:02:01 +05:30
return [ elemental_scalar ( node , value ) for node in self . element ( e ) . items ]
2011-02-01 23:55:40 +05:30
def element_scalar_label ( elem , idx ) :
return ' User Defined Variable %i ' % ( idx + 1 )
def element_tensors ( self ) :
return self . N_element_tensors
2011-01-12 22:25:56 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
class MyOption ( Option ) :
# -----------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
2011-04-12 23:16:35 +05:30
ACTIONS = Option . ACTIONS + ( " extend " , )
STORE_ACTIONS = Option . STORE_ACTIONS + ( " extend " , )
TYPED_ACTIONS = Option . TYPED_ACTIONS + ( " extend " , )
ALWAYS_TYPED_ACTIONS = Option . ALWAYS_TYPED_ACTIONS + ( " extend " , )
def take_action ( self , action , dest , opt , value , values , parser ) :
if action == " extend " :
lvalue = value . split ( " , " )
values . ensure_value ( dest , [ ] ) . extend ( lvalue )
else :
Option . take_action ( self , action , dest , opt , value , values , parser )
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
class backgroundMessage ( threading . Thread ) :
# -----------------------------
2011-04-12 23:16:35 +05:30
def __init__ ( self ) :
threading . Thread . __init__ ( self )
self . message = ' '
self . new_message = ' '
self . counter = 0
self . symbols = [ ' - ' , ' \ ' , ' | ' , ' / ' , ]
self . waittime = 0.5
def __quit__ ( self ) :
length = len ( self . message ) + len ( self . symbols [ self . counter ] )
sys . stderr . write ( chr ( 8 ) * length + ' ' * length + chr ( 8 ) * length )
sys . stderr . write ( ' ' )
def run ( self ) :
while not threading . enumerate ( ) [ 0 ] . _Thread__stopped :
time . sleep ( self . waittime )
self . update_message ( )
self . __quit__ ( )
def set_message ( self , new_message ) :
self . new_message = new_message
self . print_message ( )
def print_message ( self ) :
length = len ( self . message ) + len ( self . symbols [ self . counter ] )
2011-06-15 23:19:59 +05:30
sys . stderr . write ( chr ( 8 ) * length + ' ' * length + chr ( 8 ) * length ) # delete former message
2011-04-12 23:16:35 +05:30
sys . stderr . write ( self . symbols [ self . counter ] + self . new_message ) # print new message
self . message = self . new_message
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
def update_message ( self ) :
self . counter = ( self . counter + 1 ) % len ( self . symbols )
self . print_message ( )
2010-08-17 02:17:27 +05:30
# -----------------------------
def ipCoords ( elemType , nodalCoordinates ) :
#
# returns IP coordinates for a given element
# -----------------------------
nodeWeightsPerNode = {
2011-05-05 14:46:29 +05:30
7 : [ [ 27.0 , 9.0 , 3.0 , 9.0 , 9.0 , 3.0 , 1.0 , 3.0 ] ,
[ 9.0 , 27.0 , 9.0 , 3.0 , 3.0 , 9.0 , 3.0 , 1.0 ] ,
[ 3.0 , 9.0 , 27.0 , 9.0 , 1.0 , 3.0 , 9.0 , 3.0 ] ,
[ 9.0 , 3.0 , 9.0 , 27.0 , 3.0 , 1.0 , 3.0 , 9.0 ] ,
[ 9.0 , 3.0 , 1.0 , 3.0 , 27.0 , 9.0 , 3.0 , 9.0 ] ,
[ 3.0 , 9.0 , 3.0 , 1.0 , 9.0 , 27.0 , 9.0 , 3.0 ] ,
[ 1.0 , 3.0 , 9.0 , 3.0 , 3.0 , 9.0 , 27.0 , 9.0 ] ,
[ 3.0 , 1.0 , 3.0 , 9.0 , 9.0 , 3.0 , 9.0 , 27.0 ] ] ,
57 : [ [ 27.0 , 9.0 , 3.0 , 9.0 , 9.0 , 3.0 , 1.0 , 3.0 ] ,
[ 9.0 , 27.0 , 9.0 , 3.0 , 3.0 , 9.0 , 3.0 , 1.0 ] ,
[ 3.0 , 9.0 , 27.0 , 9.0 , 1.0 , 3.0 , 9.0 , 3.0 ] ,
[ 9.0 , 3.0 , 9.0 , 27.0 , 3.0 , 1.0 , 3.0 , 9.0 ] ,
[ 9.0 , 3.0 , 1.0 , 3.0 , 27.0 , 9.0 , 3.0 , 9.0 ] ,
[ 3.0 , 9.0 , 3.0 , 1.0 , 9.0 , 27.0 , 9.0 , 3.0 ] ,
[ 1.0 , 3.0 , 9.0 , 3.0 , 3.0 , 9.0 , 27.0 , 9.0 ] ,
[ 3.0 , 1.0 , 3.0 , 9.0 , 9.0 , 3.0 , 9.0 , 27.0 ] ] ,
2011-05-06 15:30:27 +05:30
117 : [ [ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 ] ] ,
2011-04-12 23:16:35 +05:30
125 : [ [ 3.0 , 0.0 , 0.0 , 4.0 , 1.0 , 4.0 ] ,
2011-05-05 14:46:29 +05:30
[ 0.0 , 3.0 , 0.0 , 4.0 , 4.0 , 1.0 ] ,
[ 0.0 , 0.0 , 3.0 , 1.0 , 4.0 , 4.0 ] , ] ,
2011-04-12 23:16:35 +05:30
136 : [ [ 42.0 , 15.0 , 15.0 , 14.0 , 5.0 , 5.0 ] ,
2011-05-05 14:46:29 +05:30
[ 15.0 , 42.0 , 15.0 , 5.0 , 14.0 , 5.0 ] ,
[ 15.0 , 15.0 , 42.0 , 5.0 , 5.0 , 14.0 ] ,
[ 14.0 , 5.0 , 5.0 , 42.0 , 15.0 , 15.0 ] ,
[ 5.0 , 14.0 , 5.0 , 15.0 , 42.0 , 15.0 ] ,
[ 5.0 , 5.0 , 14.0 , 15.0 , 15.0 , 42.0 ] ] ,
2011-04-12 23:16:35 +05:30
}
2010-08-17 02:17:27 +05:30
2011-05-05 14:46:29 +05:30
Nips = len ( nodeWeightsPerNode [ elemType ] )
ipCoordinates = [ [ 0.0 , 0.0 , 0.0 ] for i in range ( Nips ) ]
for ip in range ( Nips ) :
2010-08-17 02:17:27 +05:30
for node in range ( len ( nodeWeightsPerNode [ elemType ] [ ip ] ) ) :
for i in range ( 3 ) :
ipCoordinates [ ip ] [ i ] + = nodeWeightsPerNode [ elemType ] [ ip ] [ node ] * nodalCoordinates [ node ] [ i ]
for i in range ( 3 ) :
ipCoordinates [ ip ] [ i ] / = sum ( nodeWeightsPerNode [ elemType ] [ ip ] )
return ipCoordinates
2011-05-06 15:30:27 +05:30
# -----------------------------
def ipIDs ( elemType ) :
#
# returns IP numbers for given element type
# -----------------------------
ipPerNode = {
7 : [ 1 , 2 , 4 , 3 , 5 , 6 , 8 , 7 ] ,
57 : [ 1 , 2 , 4 , 3 , 5 , 6 , 8 , 7 ] ,
117 : [ 1 ] ,
125 : [ 1 , 2 , 3 ] ,
136 : [ 1 , 2 , 3 , 4 , 5 , 6 ] ,
}
return ipPerNode [ elemType ]
2010-08-17 02:17:27 +05:30
# -----------------------------
def substituteLocation ( string , mesh , coords ) :
#
# do variable interpolation in group and filter strings
# -----------------------------
substitute = string
substitute = substitute . replace ( ' elem ' , str ( mesh [ 0 ] ) )
substitute = substitute . replace ( ' node ' , str ( mesh [ 1 ] ) )
2011-05-06 15:30:27 +05:30
substitute = substitute . replace ( ' ip ' , str ( mesh [ 2 ] ) )
substitute = substitute . replace ( ' grain ' , str ( mesh [ 3 ] ) )
2010-08-17 02:17:27 +05:30
substitute = substitute . replace ( ' x ' , ' %.6g ' % coords [ 0 ] )
substitute = substitute . replace ( ' y ' , ' %.6g ' % coords [ 1 ] )
substitute = substitute . replace ( ' z ' , ' %.6g ' % coords [ 2 ] )
return substitute
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
def heading ( glue , parts ) :
2010-08-17 02:17:27 +05:30
#
2011-04-12 23:16:35 +05:30
# joins pieces from parts by glue. second to last entry in pieces tells multiplicity
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
header = [ ]
for pieces in parts :
if pieces [ - 2 ] == 0 :
del pieces [ - 2 ]
header . append ( glue . join ( map ( str , pieces ) ) )
return header
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# -----------------------------
def mapIncremental ( label , mapping , N , base , new ) :
#
# applies the function defined by "mapping"
# (can be either 'min','max','avg', 'sum', or user specified)
# to a list of data
# -----------------------------
2011-04-13 22:01:44 +05:30
theMap = { ' min ' : lambda n , b , a : min ( b , a ) ,
' max ' : lambda n , b , a : max ( b , a ) ,
' avg ' : lambda n , b , a : ( n * b + a ) / ( n + 1 ) ,
2011-07-15 16:57:47 +05:30
' avgabs ' : lambda n , b , a : ( n * b + abs ( a ) ) / ( n + 1 ) ,
2011-04-13 22:01:44 +05:30
' sum ' : lambda n , b , a : b + a ,
2011-07-15 16:57:47 +05:30
' sumabs ' : lambda n , b , a : b + abs ( a ) ,
2011-04-13 22:01:44 +05:30
' unique ' : lambda n , b , a : { True : a , False : ' n/a ' } [ n == 0 or b == a ]
}
if mapping in theMap :
mapped = map ( theMap [ mapping ] , [ N ] * len ( base ) , base , new ) # map one of the standard functions to data
if label . lower ( ) == ' orientation ' : # orientation is special case:...
orientationNorm = math . sqrt ( sum ( [ q * q for q in mapped ] ) ) # ...calc norm of average quaternion
mapped = map ( lambda x : x / orientationNorm , mapped ) # ...renormalize quaternion
2010-08-17 02:17:27 +05:30
else :
2011-04-13 22:01:44 +05:30
try :
2011-06-08 19:26:21 +05:30
mapped = eval ( ' map( %s ,[N]*len(base),base,new) ' % mapping ) # map user defined function to colums in chunks
2011-04-13 22:01:44 +05:30
except :
mapped = [ ' n/a ' ] * len ( base )
2010-08-17 02:17:27 +05:30
return mapped
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-01-12 22:25:56 +05:30
def OpenPostfile ( name , type ) :
2010-08-17 02:17:27 +05:30
#
# open postfile with extrapolation mode "translate"
# -----------------------------
2011-04-12 23:16:35 +05:30
p = { \
' spectral ' : MPIEspectral_result , \
' marc ' : post_open , \
2011-06-21 18:08:58 +05:30
} [ type ] ( name )
2011-04-12 23:16:35 +05:30
p . extrapolation ( ' translate ' )
p . moveto ( 1 )
return p
2010-08-17 02:17:27 +05:30
# -----------------------------
def ParseOutputFormat ( filename , what , me ) :
#
# parse .output* files in order to get a list of outputs
# -----------------------------
2011-06-15 23:19:59 +05:30
content = [ ]
2011-04-12 23:16:35 +05:30
format = { ' outputs ' : { } , ' specials ' : { ' brothers ' : [ ] } }
for prefix in [ ' ' ] + map ( str , range ( 1 , 17 ) ) :
if os . path . exists ( prefix + filename + ' .output ' + what ) :
2011-06-15 23:19:59 +05:30
try :
file = open ( prefix + filename + ' .output ' + what )
content = file . readlines ( )
file . close ( )
break
except :
pass
2011-07-21 21:15:41 +05:30
2011-06-15 23:19:59 +05:30
if content == [ ] : return format # nothing found...
2011-04-12 23:16:35 +05:30
tag = ' '
tagID = 0
for line in content :
if re . match ( " \ s*$ " , line ) or re . match ( " # " , line ) : # skip blank lines and comments
continue
2011-07-21 21:15:41 +05:30
m = re . match ( " \ [(.+) \ ] " , line ) # look for block indicator
if m : # next section
2011-04-12 23:16:35 +05:30
tag = m . group ( 1 )
tagID + = 1
format [ ' specials ' ] [ ' brothers ' ] . append ( tag )
if tag == me or ( me . isdigit ( ) and tagID == int ( me ) ) :
format [ ' specials ' ] [ ' _id ' ] = tagID
format [ ' outputs ' ] = [ ]
tag = me
else : # data from section
if tag == me :
( output , length ) = line . split ( )
output . lower ( )
if length . isdigit ( ) :
length = int ( length )
if re . match ( " \ ((.+) \ ) " , output ) : # special data, e.g. (Ngrains)
format [ ' specials ' ] [ output ] = length
elif length > 0 :
format [ ' outputs ' ] . append ( [ output , length ] )
return format
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-11-23 14:39:00 +05:30
def ParsePostfile ( p , filename , outputFormat , legacyFormat ) :
2010-08-17 02:17:27 +05:30
#
# parse postfile in order to get position and labels of outputs
# needs "outputFormat" for mapping of output names to postfile output indices
# -----------------------------
2011-11-23 14:39:00 +05:30
startVar = { True : ' GrainCount ' ,
False : ' HomogenizationCount ' }
2011-04-12 23:16:35 +05:30
# --- build statistics
stat = { \
' IndexOfLabel ' : { } , \
' Title ' : p . title ( ) , \
' Extrapolation ' : p . extrapolate , \
' NumberOfIncrements ' : p . increments ( ) , \
' NumberOfNodes ' : p . nodes ( ) , \
' NumberOfNodalScalars ' : p . node_scalars ( ) , \
' LabelOfNodalScalar ' : [ None ] * p . node_scalars ( ) , \
' NumberOfElements ' : p . elements ( ) , \
' NumberOfElementalScalars ' : p . element_scalars ( ) , \
' LabelOfElementalScalar ' : [ None ] * p . element_scalars ( ) , \
' NumberOfElementalTensors ' : p . element_tensors ( ) , \
' LabelOfElementalTensor ' : [ None ] * p . element_tensors ( ) , \
}
# --- find labels
for labelIndex in range ( stat [ ' NumberOfNodalScalars ' ] ) :
label = p . node_scalar_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfNodalScalar ' ] [ labelIndex ] = label
for labelIndex in range ( stat [ ' NumberOfElementalScalars ' ] ) :
label = p . element_scalar_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfElementalScalar ' ] [ labelIndex ] = label
for labelIndex in range ( stat [ ' NumberOfElementalTensors ' ] ) :
label = p . element_tensor_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfElementalTensor ' ] [ labelIndex ] = label
2011-11-23 14:39:00 +05:30
if ' User Defined Variable 1 ' in stat [ ' IndexOfLabel ' ] : # output format without dedicated names?
stat [ ' IndexOfLabel ' ] [ startVar [ legacyFormat ] ] = stat [ ' IndexOfLabel ' ] [ ' User Defined Variable 1 ' ] # adjust first named entry
2011-04-12 23:16:35 +05:30
2011-11-23 14:39:00 +05:30
if startVar [ legacyFormat ] in stat [ ' IndexOfLabel ' ] : # does the result file contain relevant user defined output at all?
startIndex = stat [ ' IndexOfLabel ' ] [ startVar [ legacyFormat ] ]
stat [ ' LabelOfElementalScalar ' ] [ startIndex ] = startVar [ legacyFormat ]
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
# given in the .output* file
2011-11-23 14:39:00 +05:30
offset = 1
if legacyFormat :
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = startVar [ not legacyFormat ] # add HomogenizationCount as second
offset + = 1
2010-08-17 02:17:27 +05:30
2011-11-23 14:39:00 +05:30
for ( name , N ) in outputFormat [ ' Homogenization ' ] [ ' outputs ' ] :
for i in range ( N ) :
label = { False : ' %s ' % ( name ) ,
True : ' %i _ %s ' % ( i + 1 , name ) } [ N > 1 ]
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
offset + = 1
2011-06-21 18:08:58 +05:30
2011-11-23 14:39:00 +05:30
if not legacyFormat :
stat [ ' IndexOfLabel ' ] [ startVar [ not legacyFormat ] ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = startVar [ not legacyFormat ] # add GrainCount
offset + = 1
2011-06-21 18:08:58 +05:30
if ' (ngrains) ' in outputFormat [ ' Homogenization ' ] [ ' specials ' ] :
for grain in range ( outputFormat [ ' Homogenization ' ] [ ' specials ' ] [ ' (ngrains) ' ] ) :
2011-11-23 14:39:00 +05:30
stat [ ' IndexOfLabel ' ] [ ' %i _CrystalliteCount ' % ( grain + 1 ) ] = startIndex + offset # report crystallite count
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = ' %i _CrystalliteCount ' % ( grain + 1 ) # add GrainCount
2011-06-21 18:08:58 +05:30
offset + = 1
2011-11-23 14:39:00 +05:30
for ( name , N ) in outputFormat [ ' Crystallite ' ] [ ' outputs ' ] : # add crystallite outputs
for i in range ( N ) :
label = { False : ' %i _ %s ' % ( grain + 1 , name ) ,
True : ' %i _ %i _ %s ' % ( grain + 1 , i + 1 , name ) } [ N > 1 ]
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
offset + = 1
stat [ ' IndexOfLabel ' ] [ ' %i _ConstitutiveCount ' % ( grain + 1 ) ] = startIndex + offset # report constitutive count
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = ' %i _ConstitutiveCount ' % ( grain + 1 ) # add GrainCount
2011-06-21 18:08:58 +05:30
offset + = 1
2011-11-23 14:39:00 +05:30
for ( name , N ) in outputFormat [ ' Constitutive ' ] [ ' outputs ' ] : # add constitutive outputs
for i in range ( N ) :
label = { False : ' %i _ %s ' % ( grain + 1 , name ) ,
True : ' %i _ %i _ %s ' % ( grain + 1 , i + 1 , name ) } [ N > 1 ]
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
offset + = 1
2011-04-12 23:16:35 +05:30
return stat
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-06-21 18:08:58 +05:30
def SummarizePostfile ( stat , where = sys . stdout , format = ' marc ' ) :
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
where . write ( ' \n \n ' )
where . write ( ' title: \t %s ' % stat [ ' Title ' ] + ' \n \n ' )
where . write ( ' extraplation: \t %s ' % stat [ ' Extrapolation ' ] + ' \n \n ' )
where . write ( ' increments: \t %i ' % ( stat [ ' NumberOfIncrements ' ] ) + ' \n \n ' )
where . write ( ' nodes: \t %i ' % stat [ ' NumberOfNodes ' ] + ' \n \n ' )
where . write ( ' elements: \t %i ' % stat [ ' NumberOfElements ' ] + ' \n \n ' )
where . write ( ' nodal scalars: \t %i ' % stat [ ' NumberOfNodalScalars ' ] + ' \n \n ' + ' \n ' . join ( stat [ ' LabelOfNodalScalar ' ] ) + ' \n \n ' )
where . write ( ' elemental scalars: \t %i ' % stat [ ' NumberOfElementalScalars ' ] + ' \n \n ' + ' \n ' . join ( stat [ ' LabelOfElementalScalar ' ] ) + ' \n \n ' )
where . write ( ' elemental tensors: \t %i ' % stat [ ' NumberOfElementalTensors ' ] + ' \n \n ' + ' \n ' . join ( stat [ ' LabelOfElementalTensor ' ] ) + ' \n \n ' )
return True
2010-08-17 02:17:27 +05:30
# -----------------------------
# MAIN FUNCTION STARTS HERE
# -----------------------------
# --- input parsing
parser = OptionParser ( option_class = MyOption , usage = ' % prog [options] resultfile ' , description = """
2011-02-01 18:43:05 +05:30
Extract data from a . t16 ( MSC . Marc ) or . spectralOut results file .
2010-08-17 02:17:27 +05:30
List of output variables is given by options ' --ns ' , ' --es ' , ' --et ' , ' --ho ' , ' --cr ' , ' --co ' .
2011-05-26 15:11:53 +05:30
Filters and separations use ' elem ' , ' node ' , ' ip ' , ' grain ' , and ' x ' , ' y ' , ' z ' as key words .
2010-08-17 02:17:27 +05:30
Example :
2011-05-26 15:11:53 +05:30
1 ) get averaged results in slices perpendicular to x for all negative y coordinates
- - filter ' y < 0.0 ' - - separation x - - map ' avg '
2010-08-17 02:17:27 +05:30
2 ) global sum of squared data falling into first quadrant arc between R1 and R2
2011-05-26 15:11:53 +05:30
- - filter ' x >= 0.0 and y >= 0.0 and x*x + y*y >= R1*R1 and x*x + y*y <= R2*R2 '
- - map ' lambda n,b,a: n*b+a*a '
User mappings need to be formulated in an incremental fashion for each new data point , a ( dd ) ,
and may use the current ( incremental ) result , b ( ase ) , as well as the number , n ( umber ) ,
of already processed data points for evaluation .
2010-08-17 02:17:27 +05:30
2011-08-18 13:30:19 +05:30
""" + string.replace( ' $Id$ ' , ' \n ' , ' \\ n ' )
)
2010-08-17 02:17:27 +05:30
parser . add_option ( ' -i ' , ' --info ' , action = ' store_true ' , dest = ' info ' , \
2011-07-21 21:15:41 +05:30
help = ' list contents of resultfile [ %d efault] ' )
2011-11-23 14:39:00 +05:30
parser . add_option ( ' -l ' , ' --legacy ' , action = ' store_true ' , dest = ' legacy ' , \
help = ' legacy user result block (starts with GrainCount) [ %d efault] ' )
2011-06-08 22:24:46 +05:30
parser . add_option ( ' --prefix ' , dest = ' prefix ' , \
2011-07-21 21:15:41 +05:30
help = ' prefix to result file name [ %d efault] ' )
2012-02-28 19:54:45 +05:30
parser . add_option ( ' --suffix ' , dest = ' suffix ' , \
help = ' suffix to result file name [ %d efault] ' )
2011-07-21 21:15:41 +05:30
parser . add_option ( ' -d ' , ' --dir ' , dest = ' dir ' , \
help = ' name of subdirectory to hold output [ %d efault] ' )
2011-01-12 22:25:56 +05:30
parser . add_option ( ' -s ' , ' --split ' , action = ' store_true ' , dest = ' separateFiles ' , \
2011-07-21 21:15:41 +05:30
help = ' split output per increment [ %d efault] ' )
2010-08-17 02:17:27 +05:30
parser . add_option ( ' -r ' , ' --range ' , dest = ' range ' , type = ' int ' , nargs = 3 , \
2011-07-21 21:15:41 +05:30
help = ' range of positions (or increments) to output (start, end, step) [all] ' )
parser . add_option ( ' --increments ' , action = ' store_true ' , dest = ' getIncrements ' , \
help = ' switch to increment range [ %d efault] ' )
2011-05-02 21:40:18 +05:30
parser . add_option ( ' --sloppy ' , action = ' store_true ' , dest = ' sloppy ' , \
2011-07-21 21:15:41 +05:30
help = ' do not pre-check validity of increment range ' )
2010-08-17 02:17:27 +05:30
parser . add_option ( ' -m ' , ' --map ' , dest = ' func ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' data reduction mapping [ " %d efault " ] out of min, max, avg, avgabs, sum, sumabs or user-lambda ' )
2011-01-12 22:25:56 +05:30
parser . add_option ( ' -p ' , ' --type ' , dest = ' filetype ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' type of result file [auto] ' )
2011-06-21 18:08:58 +05:30
2010-08-17 02:17:27 +05:30
group_material = OptionGroup ( parser , ' Material identifier ' )
group_material . add_option ( ' --homogenization ' , dest = ' homog ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' homogenization identifier (as string or integer [ %d efault]) ' , metavar = ' <ID> ' )
2010-08-17 02:17:27 +05:30
group_material . add_option ( ' --crystallite ' , dest = ' cryst ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' crystallite identifier (as string or integer [ %d efault]) ' , metavar = ' <ID> ' )
2010-08-17 02:17:27 +05:30
group_material . add_option ( ' --phase ' , dest = ' phase ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' phase identifier (as string or integer [ %d efault]) ' , metavar = ' <ID> ' )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
group_special = OptionGroup ( parser , ' Special outputs ' )
2010-08-17 02:17:27 +05:30
group_special . add_option ( ' -t ' , ' --time ' , action = ' store_true ' , dest = ' time ' , \
2011-07-21 21:15:41 +05:30
help = ' output time of increment [ %d efault] ' )
2010-08-17 02:17:27 +05:30
group_special . add_option ( ' -f ' , ' --filter ' , dest = ' filter ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' condition(s) to filter results [ %d efault] ' , metavar = ' <CODE> ' )
group_special . add_option ( ' --separation ' , action = ' extend ' , dest = ' sep ' , type = ' string ' , \
help = ' properties to separate results [ %d efault] ' , metavar = ' <LIST> ' )
2011-02-01 18:54:19 +05:30
group_special . add_option ( ' --sort ' , action = ' extend ' , dest = ' sort ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' properties to sort results [ %d efault] ' , metavar = ' <LIST> ' )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
group_general = OptionGroup ( parser , ' General outputs ' )
2010-08-17 02:17:27 +05:30
group_general . add_option ( ' --ns ' , action = ' extend ' , dest = ' nodalScalar ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' nodal scalars to extract ' , metavar = ' <LIST> ' )
group_general . add_option ( ' --es ' , action = ' extend ' , dest = ' elemScalar ' , type = ' string ' , \
help = ' elemental scalars to extract ' , metavar = ' <LIST> ' )
group_general . add_option ( ' --et ' , action = ' extend ' , dest = ' elemTensor ' , type = ' string ' , \
help = ' elemental tensors to extract ' , metavar = ' <LIST> ' )
2010-08-17 02:17:27 +05:30
group_general . add_option ( ' --ho ' , action = ' extend ' , dest = ' homogenizationResult ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' homogenization results to extract ' , metavar = ' <LIST> ' )
2010-08-17 02:17:27 +05:30
group_general . add_option ( ' --cr ' , action = ' extend ' , dest = ' crystalliteResult ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' crystallite results to extract ' , metavar = ' <LIST> ' )
2010-08-17 02:17:27 +05:30
group_general . add_option ( ' --co ' , action = ' extend ' , dest = ' constitutiveResult ' , type = ' string ' , \
2011-07-21 21:15:41 +05:30
help = ' constitutive results to extract ' , metavar = ' <LIST> ' )
2010-08-17 02:17:27 +05:30
parser . add_option_group ( group_material )
parser . add_option_group ( group_general )
parser . add_option_group ( group_special )
parser . set_defaults ( info = False )
2011-11-23 14:39:00 +05:30
parser . set_defaults ( legacy = False )
2011-05-02 21:40:18 +05:30
parser . set_defaults ( sloppy = False )
2011-06-08 22:24:46 +05:30
parser . set_defaults ( prefix = ' ' )
2012-02-28 19:54:45 +05:30
parser . set_defaults ( suffix = ' ' )
2011-07-21 21:15:41 +05:30
parser . set_defaults ( dir = ' postProc ' )
2011-06-21 18:08:58 +05:30
parser . set_defaults ( filetype = None )
2010-08-17 02:17:27 +05:30
parser . set_defaults ( func = ' avg ' )
parser . set_defaults ( homog = ' 1 ' )
parser . set_defaults ( cryst = ' 1 ' )
parser . set_defaults ( phase = ' 1 ' )
parser . set_defaults ( filter = ' ' )
2011-07-21 21:15:41 +05:30
parser . set_defaults ( sep = [ ] )
2011-02-01 18:54:19 +05:30
parser . set_defaults ( sort = [ ] )
2010-08-17 02:17:27 +05:30
parser . set_defaults ( inc = False )
parser . set_defaults ( time = False )
parser . set_defaults ( separateFiles = False )
2011-07-21 21:15:41 +05:30
parser . set_defaults ( getIncrements = False )
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
( options , files ) = parser . parse_args ( )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
# --- basic sanity checks
if files == [ ] :
parser . print_help ( )
parser . error ( ' no file specified... ' )
if not os . path . exists ( files [ 0 ] ) :
parser . print_help ( )
parser . error ( ' invalid file " %s " specified... ' % files [ 0 ] )
# --- figure out filetype
if options . filetype == None :
ext = os . path . splitext ( files [ 0 ] ) [ 1 ]
for theType in fileExtensions . keys ( ) :
if ext in fileExtensions [ theType ] :
options . filetype = theType
break
2011-03-10 15:15:57 +05:30
options . filetype = options . filetype . lower ( )
2011-07-22 17:29:03 +05:30
if options . filetype == ' marc ' : offset_pos = 1
else : offset_pos = 0
2011-06-21 18:08:58 +05:30
# --- more sanity checks
if options . filetype not in [ ' marc ' , ' spectral ' ] :
parser . print_help ( )
parser . error ( ' file type " %s " not supported... ' % options . filetype )
2011-03-10 15:15:57 +05:30
if options . filetype == ' marc ' :
2012-01-16 15:02:36 +05:30
sys . path . append ( damask . solver . Marc ( ) . libraryPath ( ' ../../ ' ) )
2011-04-12 23:16:35 +05:30
try :
from py_post import *
except :
2011-11-23 14:39:00 +05:30
print ( ' error: no valid Mentat release found ' )
2011-04-12 23:16:35 +05:30
sys . exit ( - 1 )
2011-03-10 15:15:57 +05:30
else :
def post_open ( ) :
return
2011-02-22 21:27:27 +05:30
2010-08-17 02:17:27 +05:30
if options . constitutiveResult and not options . phase :
2011-04-12 23:16:35 +05:30
parser . print_help ( )
parser . error ( ' constitutive results require phase... ' )
2010-08-17 02:17:27 +05:30
2011-07-21 21:15:41 +05:30
if options . nodalScalar and ( options . elemScalar or options . elemTensor
2010-08-17 02:17:27 +05:30
or options . homogenizationResult or options . crystalliteResult or options . constitutiveResult ) :
2011-04-12 23:16:35 +05:30
parser . print_help ( )
parser . error ( ' not allowed to mix nodal with elemental results... ' )
2010-08-17 02:17:27 +05:30
2011-07-21 21:15:41 +05:30
if not options . nodalScalar : options . nodalScalar = [ ]
if not options . elemScalar : options . elemScalar = [ ]
if not options . elemTensor : options . elemTensor = [ ]
if not options . homogenizationResult : options . homogenizationResult = [ ]
if not options . crystalliteResult : options . crystalliteResult = [ ]
if not options . constitutiveResult : options . constitutiveResult = [ ]
2010-08-17 02:17:27 +05:30
2011-04-13 22:01:44 +05:30
options . sort . reverse ( )
2011-07-21 21:15:41 +05:30
options . sep . reverse ( )
2011-04-13 22:01:44 +05:30
2011-03-11 22:02:01 +05:30
# --- start background messaging
bg = backgroundMessage ( )
bg . start ( )
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
# --- parse .output and .t16 files
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
if os . path . splitext ( files [ 0 ] ) [ 1 ] == ' ' :
filename = files [ 0 ]
extension = fileExtensions [ options . filetype ]
else :
filename = os . path . splitext ( files [ 0 ] ) [ 0 ]
extension = os . path . splitext ( files [ 0 ] ) [ 1 ]
2010-08-17 02:17:27 +05:30
outputFormat = { }
me = {
2011-04-12 23:16:35 +05:30
' Homogenization ' : options . homog ,
' Crystallite ' : options . cryst ,
' Constitutive ' : options . phase ,
2010-08-17 02:17:27 +05:30
}
2011-03-11 22:02:01 +05:30
bg . set_message ( ' parsing .output files... ' )
2010-08-17 02:17:27 +05:30
for what in me :
2011-04-12 23:16:35 +05:30
outputFormat [ what ] = ParseOutputFormat ( filename , what , me [ what ] )
if not ' _id ' in outputFormat [ what ] [ ' specials ' ] :
2011-07-21 21:15:41 +05:30
print " \n section ' %s ' not found in < %s > " % ( me [ what ] , what )
print ' \n ' . join ( map ( lambda x : ' [ %s ] ' % x , outputFormat [ what ] [ ' specials ' ] [ ' brothers ' ] ) )
2011-04-12 23:16:35 +05:30
2011-03-11 22:02:01 +05:30
bg . set_message ( ' opening result file... ' )
2011-06-21 18:08:58 +05:30
p = OpenPostfile ( filename + extension , options . filetype )
2011-03-11 22:02:01 +05:30
bg . set_message ( ' parsing result file... ' )
2011-11-23 14:39:00 +05:30
stat = ParsePostfile ( p , filename , outputFormat , options . legacy )
2011-03-11 22:02:01 +05:30
if options . filetype == ' marc ' :
2011-04-12 23:16:35 +05:30
stat [ ' NumberOfIncrements ' ] - = 1 # t16 contains one "virtual" increment (at 0)
2010-08-17 02:17:27 +05:30
# --- sanity check for output variables
2011-07-21 21:15:41 +05:30
# for mentat variables (nodalScalar,elemScalar,elemTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
2010-08-17 02:17:27 +05:30
# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different
2011-07-21 21:15:41 +05:30
for opt in [ ' nodalScalar ' , ' elemScalar ' , ' elemTensor ' , ' homogenizationResult ' , ' crystalliteResult ' , ' constitutiveResult ' ] :
2011-04-12 23:16:35 +05:30
if eval ( ' options. %s ' % opt ) :
for label in eval ( ' options. %s ' % opt ) :
2011-07-21 21:15:41 +05:30
if ( opt in [ ' nodalScalar ' , ' elemScalar ' , ' elemTensor ' ] and label not in stat [ ' IndexOfLabel ' ] and label not in [ ' elements ' , ] ) \
2011-04-12 23:16:35 +05:30
or ( opt in [ ' homogenizationResult ' , ' crystalliteResult ' , ' constitutiveResult ' ] \
and ( not outputFormat [ opt [ : - 6 ] . capitalize ( ) ] [ ' outputs ' ] or not label in zip ( * outputFormat [ opt [ : - 6 ] . capitalize ( ) ] [ ' outputs ' ] ) [ 0 ] ) ) :
parser . error ( ' %s " %s " unknown... ' % ( opt , label ) )
2010-08-17 02:17:27 +05:30
# --- output info
if options . info :
2011-04-12 23:16:35 +05:30
if options . filetype == ' marc ' :
2012-01-16 15:02:36 +05:30
print ' \n \n Mentat release %s ' % damask . solver . Marc ( ) . version ( ' ../../ ' )
2011-06-21 18:08:58 +05:30
if options . filetype == ' spectral ' :
print ' \n \n ' , p
2011-03-10 15:15:57 +05:30
2012-02-28 18:51:59 +05:30
SummarizePostfile ( stat )
2011-04-12 23:16:35 +05:30
print ' \n User Defined Outputs '
for what in me :
print ' \n ' , what , ' : '
for output in outputFormat [ what ] [ ' outputs ' ] :
print ' ' , output
sys . exit ( 0 )
2010-08-17 02:17:27 +05:30
2011-06-08 22:24:46 +05:30
# --- build connectivity maps
elementsOfNode = { }
for e in xrange ( stat [ ' NumberOfElements ' ] ) :
if e % 1000 == 0 :
bg . set_message ( ' connect elem %i ... ' % e )
for n in map ( p . node_sequence , p . element ( e ) . items ) :
if n not in elementsOfNode :
2011-07-21 21:15:41 +05:30
elementsOfNode [ n ] = [ p . element_id ( e ) ]
2011-06-08 22:24:46 +05:30
else :
2011-07-21 21:15:41 +05:30
elementsOfNode [ n ] + = [ p . element_id ( e ) ]
2011-06-08 22:24:46 +05:30
maxCountElementsOfNode = 0
for l in elementsOfNode . values ( ) :
maxCountElementsOfNode = max ( maxCountElementsOfNode , len ( l ) )
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- build group membership --------------------------------
2010-11-02 21:15:23 +05:30
2011-07-22 17:29:03 +05:30
p . moveto ( offset_pos )
2011-04-12 23:16:35 +05:30
index = { }
groups = [ ]
groupCount = 0
memberCount = 0
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
if options . nodalScalar :
for n in xrange ( stat [ ' NumberOfNodes ' ] ) :
if n % 1000 == 0 :
bg . set_message ( ' scan node %i ... ' % n )
myNodeID = p . node_id ( n )
myNodeCoordinates = [ p . node ( n ) . x , p . node ( n ) . y , p . node ( n ) . z ]
myElemID = 0
2011-05-06 15:30:27 +05:30
myIpID = 0
2011-04-12 23:16:35 +05:30
myGrainID = 0
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# --- filter valid locations
2010-08-17 02:17:27 +05:30
2011-05-06 15:30:27 +05:30
filter = substituteLocation ( options . filter , [ myElemID , myNodeID , myIpID , myGrainID ] , myNodeCoordinates ) # generates an expression that is only true for the locations specified by options.filter
2011-04-12 23:16:35 +05:30
if filter != ' ' and not eval ( filter ) : # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# --- group data locations
2010-08-17 02:17:27 +05:30
2011-07-21 21:15:41 +05:30
grp = substituteLocation ( ' # ' . join ( options . sep ) , [ myElemID , myNodeID , myIpID , myGrainID ] , myNodeCoordinates ) # generates a unique key for a group of separated data based on the separation criterium for the location
2011-04-12 23:16:35 +05:30
if grp not in index : # create a new group if not yet present
index [ grp ] = groupCount
2011-06-08 22:24:46 +05:30
groups . append ( [ [ 0 , 0 , 0 , 0 , 0.0 , 0.0 , 0.0 ] ] ) # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount + = 1
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] = mapIncremental ( ' ' , ' unique ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] ,
[ myElemID , myNodeID , myIpID , myGrainID ] ) # keep only if unique average location
groups [ index [ grp ] ] [ 0 ] [ 4 : ] = mapIncremental ( ' ' , ' avg ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ 4 : ] ,
myNodeCoordinates ) # incrementally update average location
groups [ index [ grp ] ] . append ( [ myElemID , myNodeID , myIpID , myGrainID , 0 ] ) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount + = 1
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
else :
for e in xrange ( stat [ ' NumberOfElements ' ] ) :
if e % 1000 == 0 :
bg . set_message ( ' scan elem %i ... ' % e )
myElemID = p . element_id ( e )
2011-05-06 15:30:27 +05:30
myIpCoordinates = ipCoords ( p . element ( e ) . type , map ( lambda node : [ node . x , node . y , node . z ] , map ( p . node , map ( p . node_sequence , p . element ( e ) . items ) ) ) )
myIpIDs = ipIDs ( p . element ( e ) . type )
Nips = len ( myIpIDs )
myNodeIDs = p . element ( e ) . items [ : Nips ]
for n in range ( Nips ) :
myIpID = myIpIDs [ n ]
myNodeID = myNodeIDs [ n ]
2011-04-12 23:16:35 +05:30
for g in range ( ( ' GrainCount ' in stat [ ' IndexOfLabel ' ] and int ( p . element_scalar ( e , stat [ ' IndexOfLabel ' ] [ ' GrainCount ' ] ) [ 0 ] . value ) )
or 1 ) :
myGrainID = g + 1
2011-02-02 00:02:20 +05:30
2011-04-12 23:16:35 +05:30
# --- filter valid locations
2011-05-06 15:30:27 +05:30
filter = substituteLocation ( options . filter , [ myElemID , myNodeID , myIpID , myGrainID ] , myIpCoordinates [ n ] ) # generates an expression that is only true for the locations specified by options.filter
2011-04-12 23:16:35 +05:30
if filter != ' ' and not eval ( filter ) : # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
# --- group data locations
2011-07-21 21:15:41 +05:30
grp = substituteLocation ( ' # ' . join ( options . sep ) , [ myElemID , myNodeID , myIpID , myGrainID ] , myIpCoordinates [ n ] ) # generates a unique key for a group of separated data based on the separation criterium for the location
2011-04-12 23:16:35 +05:30
if grp not in index : # create a new group if not yet present
index [ grp ] = groupCount
2011-05-06 15:30:27 +05:30
groups . append ( [ [ 0 , 0 , 0 , 0 , 0.0 , 0.0 , 0.0 ] ] ) # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount + = 1
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] = mapIncremental ( ' ' , ' unique ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] ,
[ myElemID , myNodeID , myIpID , myGrainID ] ) # keep only if unique average location
groups [ index [ grp ] ] [ 0 ] [ 4 : ] = mapIncremental ( ' ' , ' avg ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ 4 : ] ,
myIpCoordinates [ n ] ) # incrementally update average location
groups [ index [ grp ] ] . append ( [ myElemID , myNodeID , myIpID , myGrainID , n ] ) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount + = 1
2011-05-06 15:30:27 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- sort groups --------------------------------
where = {
2011-05-06 15:30:27 +05:30
' elem ' : 0 ,
' node ' : 1 ,
' ip ' : 2 ,
' grain ' : 3 ,
' x ' : 4 ,
' y ' : 5 ,
' z ' : 6 ,
2011-04-12 23:16:35 +05:30
}
sortProperties = [ ]
2011-07-21 21:15:41 +05:30
for item in options . sep :
2011-04-13 22:01:44 +05:30
if item not in options . sort :
2011-04-12 23:16:35 +05:30
sortProperties . append ( item )
theKeys = [ ]
2011-04-13 22:01:44 +05:30
for criterium in options . sort + sortProperties :
2011-04-12 23:16:35 +05:30
if criterium in where :
theKeys . append ( ' x[0][ %i ] ' % where [ criterium ] )
sortKeys = eval ( ' lambda x:( %s ) ' % ( ' , ' . join ( theKeys ) ) )
bg . set_message ( ' sorting groups... ' )
groups . sort ( key = sortKeys ) # in-place sorting to save mem
2011-07-21 21:15:41 +05:30
# --------------------------- create output dir --------------------------------
2011-06-15 23:19:59 +05:30
2012-01-21 01:23:56 +05:30
dirname = os . path . abspath ( os . path . join ( os . path . dirname ( filename ) , options . dir ) )
2011-06-15 23:19:59 +05:30
if not os . path . isdir ( dirname ) :
os . mkdir ( dirname , 0755 )
2011-04-12 23:16:35 +05:30
fileOpen = False
assembleHeader = True
header = [ ]
standard = [ ' inc ' ] + \
{ True : [ ' time ' ] ,
False : [ ] } [ options . time ] + \
2011-05-06 15:30:27 +05:30
[ ' elem ' , ' node ' , ' ip ' , ' grain ' ] + \
2011-04-12 23:16:35 +05:30
{ True : [ ' node.x ' , ' node.y ' , ' node.z ' ] ,
False : [ ' ip.x ' , ' ip.y ' , ' ip.z ' ] } [ options . nodalScalar != [ ] ]
2011-06-21 18:08:58 +05:30
# --------------------------- loop over positions --------------------------------
2011-07-21 21:15:41 +05:30
bg . set_message ( ' getting map between positions and increments... ' )
incAtPosition = { }
positionOfInc = { }
2011-06-21 18:08:58 +05:30
2011-07-21 21:15:41 +05:30
for position in range ( stat [ ' NumberOfIncrements ' ] ) :
2011-06-21 18:08:58 +05:30
p . moveto ( position + offset_pos )
2011-07-21 21:15:41 +05:30
incAtPosition [ position ] = p . increment # remember "real" increment at this position
positionOfInc [ p . increment ] = position # remember position of "real" increment
2011-07-22 17:29:03 +05:30
if not options . range :
options . getIncrements = False
locations = range ( stat [ ' NumberOfIncrements ' ] ) # process all positions
else :
options . range = list ( options . range ) # convert to list
2011-07-21 21:15:41 +05:30
if options . sloppy :
locations = range ( options . range [ 0 ] , options . range [ 1 ] + 1 , options . range [ 2 ] )
else :
locations = range ( max ( 0 , options . range [ 0 ] ) ,
min ( { False : stat [ ' NumberOfIncrements ' ] ,
True : incAtPosition [ stat [ ' NumberOfIncrements ' ] - 1 ] + 1 } [ options . getIncrements ] ,
options . range [ 1 ] + 1 ) ,
options . range [ 2 ] )
2011-07-25 18:23:54 +05:30
if options . getIncrements : # build list of increments to process
increments = locations # from increment range
else :
increments = [ incAtPosition [ x ] for x in locations ] # from position range
2011-04-12 23:16:35 +05:30
time_start = time . time ( )
2011-07-22 17:29:03 +05:30
for incCount , location in enumerate ( locations ) : # walk through locations
if options . getIncrements : # we talk in increments
position = positionOfInc [ location ] # map back the actual position in the result file
else : # we talk positions anyway
position = location # just take it then
p . moveto ( position + offset_pos ) # wind to correct position
2011-07-21 21:15:41 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- file management --------------------------------
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
if options . separateFiles :
if fileOpen :
file . close ( )
fileOpen = False
2012-02-28 19:54:45 +05:30
outFilename = eval ( ' " ' + eval ( " ' %% s_inc %% 0 %i i %% s.txt ' % (math.log10(max(increments+[1]))+1) " ) + ' " % (dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount],options.suffix) ' )
2011-04-12 23:16:35 +05:30
else :
2012-02-28 19:54:45 +05:30
outFilename = ' %s .txt ' % ( dirname + os . sep + options . prefix + os . path . split ( filename ) [ 1 ] + options . suffix )
2011-04-12 23:16:35 +05:30
if not fileOpen :
file = open ( outFilename , ' w ' )
fileOpen = True
2011-05-23 12:43:28 +05:30
file . write ( ' 2 \t header \n ' )
2011-05-24 22:53:22 +05:30
file . write ( string . replace ( ' $Id$ ' , ' \n ' , ' \\ n ' ) +
' \t ' + ' ' . join ( sys . argv [ 1 : ] ) + ' \n ' )
2011-04-12 23:16:35 +05:30
headerWritten = False
file . flush ( )
# --------------------------- read and map data per group --------------------------------
member = 0
2011-05-06 15:30:27 +05:30
for group in groups :
2011-04-12 23:16:35 +05:30
N = 0 # group member counter
2011-05-06 15:30:27 +05:30
for ( e , n , i , g , n_local ) in group [ 1 : ] : # loop over group members
2011-04-12 23:16:35 +05:30
member + = 1
if member % 1000 == 0 :
2011-07-22 17:29:03 +05:30
time_delta = ( ( len ( locations ) * memberCount ) / float ( member + incCount * memberCount ) - 1.0 ) * ( time . time ( ) - time_start )
bg . set_message ( ' ( %02i : %02i : %02i ) processing point %i of %i from %s %i ... ' % ( time_delta / / 3600 , time_delta % 3600 / / 60 , time_delta % 60 , member , memberCount , { True : ' increment ' , False : ' position ' } [ options . getIncrements ] , position ) )
2011-04-13 22:01:44 +05:30
2011-04-12 23:16:35 +05:30
newby = [ ] # current member's data
2011-04-13 22:01:44 +05:30
2011-06-08 22:24:46 +05:30
if options . nodalScalar :
for label in options . nodalScalar :
if label == ' elements ' :
length = maxCountElementsOfNode
content = elementsOfNode [ p . node_sequence ( n ) ] + [ 0 ] * ( length - len ( elementsOfNode [ p . node_sequence ( n ) ] ) )
else :
length = 1
content = [ p . node_scalar ( p . node_sequence ( n ) , stat [ ' IndexOfLabel ' ] [ label ] ) ]
if assembleHeader : header + = heading ( ' _ ' , [ [ component , label ] for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
newby . append ( { ' label ' : label ,
' len ' : length ,
' content ' : content } )
2011-07-21 21:15:41 +05:30
if options . elemScalar :
for label in options . elemScalar :
2011-04-12 23:16:35 +05:30
if assembleHeader :
header + = [ label . replace ( ' ' , ' ' ) ]
newby . append ( { ' label ' : label ,
' len ' : 1 ,
2011-04-13 22:01:44 +05:30
' content ' : [ p . element_scalar ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ label ] ) [ n_local ] . value ] } )
2011-04-12 23:16:35 +05:30
2011-07-21 21:15:41 +05:30
if options . elemTensor :
for label in options . elemTensor :
2011-04-12 23:16:35 +05:30
if assembleHeader :
header + = heading ( ' . ' , [ [ label . replace ( ' ' , ' ' ) , component ] for component in [ ' intensity ' , ' t11 ' , ' t22 ' , ' t33 ' , ' t12 ' , ' t23 ' , ' t13 ' ] ] )
2011-04-13 22:01:44 +05:30
myTensor = p . element_tensor ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ label ] ) [ n_local ]
2011-04-12 23:16:35 +05:30
newby . append ( { ' label ' : label ,
2011-05-05 14:46:29 +05:30
' len ' : 7 ,
2011-04-12 23:16:35 +05:30
' content ' : [ myTensor . intensity ,
myTensor . t11 , myTensor . t22 , myTensor . t33 ,
myTensor . t12 , myTensor . t23 , myTensor . t13 ,
] } )
if options . homogenizationResult or \
options . crystalliteResult or \
options . constitutiveResult :
for ( label , resultType ) in zip ( options . homogenizationResult +
2011-07-21 21:15:41 +05:30
options . crystalliteResult +
options . constitutiveResult ,
[ ' Homogenization ' ] * len ( options . homogenizationResult ) +
[ ' Crystallite ' ] * len ( options . crystalliteResult ) +
[ ' Constitutive ' ] * len ( options . constitutiveResult )
) :
2011-04-12 23:16:35 +05:30
outputIndex = list ( zip ( * outputFormat [ resultType ] [ ' outputs ' ] ) [ 0 ] ) . index ( label ) # find the position of this output in the outputFormat
length = int ( outputFormat [ resultType ] [ ' outputs ' ] [ outputIndex ] [ 1 ] )
2011-05-24 22:53:22 +05:30
thisHead = heading ( ' _ ' , [ [ component , label ] for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
2011-04-12 23:16:35 +05:30
if assembleHeader : header + = thisHead
2011-05-24 22:53:22 +05:30
if resultType != ' Homogenization ' :
thisHead = heading ( ' _ ' , [ [ g , component , label ] for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
2011-04-12 23:16:35 +05:30
newby . append ( { ' label ' : label ,
' len ' : length ,
2011-04-13 22:01:44 +05:30
' content ' : [ p . element_scalar ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ head ] ) [ n_local ] . value
2011-04-12 23:16:35 +05:30
for head in thisHead ] } )
assembleHeader = False
2011-04-13 22:01:44 +05:30
if N == 0 :
2011-05-24 22:53:22 +05:30
mappedResult = [ float ( x ) for x in xrange ( len ( header ) ) ] # initialize with debug data (should get deleted by *N at N=0)
2011-04-12 23:16:35 +05:30
pos = 0
for chunk in newby :
mappedResult [ pos : pos + chunk [ ' len ' ] ] = mapIncremental ( chunk [ ' label ' ] , options . func ,
N , mappedResult [ pos : pos + chunk [ ' len ' ] ] , chunk [ ' content ' ] )
pos + = chunk [ ' len ' ]
N + = 1
# --- write data row to file ---
if not headerWritten :
file . write ( ' \t ' . join ( standard + header ) + ' \n ' )
headerWritten = True
2011-06-15 23:19:59 +05:30
file . write ( ' \t ' . join ( map ( str , [ p . increment ] + \
2011-04-12 23:16:35 +05:30
{ True : [ p . time ] , False : [ ] } [ options . time ] + \
group [ 0 ] + \
mappedResult )
) + ' \n ' )
2010-08-17 02:17:27 +05:30
if fileOpen :
2011-04-12 23:16:35 +05:30
file . close ( )
2010-08-17 02:17:27 +05:30
# --------------------------- DONE --------------------------------