2018-11-17 12:42:12 +05:30
#!/usr/bin/env python3
2014-04-02 00:11:14 +05:30
# -*- coding: UTF-8 no BOM -*-
2010-08-17 02:17:27 +05:30
2016-07-18 14:52:53 +05:30
import os , sys , math , re , time , struct
2012-01-16 15:02:36 +05:30
import damask
2014-09-15 15:37:52 +05:30
from optparse import OptionParser , OptionGroup
2010-08-17 23:51:22 +05:30
2016-01-27 22:36:00 +05:30
scriptName = os . path . splitext ( os . path . basename ( __file__ ) ) [ 0 ]
scriptID = ' ' . join ( [ scriptName , damask . version ] )
2014-12-19 00:56:52 +05:30
2011-11-23 14:39:00 +05:30
2011-06-21 18:08:58 +05:30
fileExtensions = { \
' marc ' : [ ' .t16 ' , ] ,
' spectral ' : [ ' .spectralOut ' , ] ,
}
2010-08-17 02:17:27 +05:30
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class vector : # mimic py_post node object
2011-02-01 23:55:40 +05:30
x , y , z = [ None , None , None ]
2016-07-18 21:59:12 +05:30
2011-02-01 23:55:40 +05:30
def __init__ ( self , coords ) :
self . x = coords [ 0 ]
self . y = coords [ 1 ]
self . z = coords [ 2 ]
2011-01-12 22:25:56 +05:30
# -----------------------------
class element : # mimic py_post element object
2011-02-01 23:55:40 +05:30
items = [ ]
type = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__ ( self , nodes , type ) :
self . items = nodes
self . type = type
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class elemental_scalar : # mimic py_post element_scalar object
2011-02-01 23:55:40 +05:30
id = None
value = None
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
def __init__ ( self , node , value ) :
self . id = node
self . value = value
2011-01-12 22:25:56 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
class MPIEspectral_result : # mimic py_post result object
2011-01-12 22:25:56 +05:30
2011-02-01 23:55:40 +05:30
file = None
dataOffset = 0
N_elemental_scalars = 0
2015-03-11 14:25:48 +05:30
grid = [ 0 , 0 , 0 ]
size = [ 0.0 , 0.0 , 0.0 ]
2011-02-01 23:55:40 +05:30
theTitle = ' '
wd = ' '
2011-02-21 22:00:18 +05:30
geometry = ' '
2011-02-01 23:55:40 +05:30
extrapolate = ' '
2011-06-15 23:19:59 +05:30
N_loadcases = 0
2011-02-01 23:55:40 +05:30
N_increments = 0
2011-07-21 21:15:41 +05:30
N_positions = 0
2011-06-15 23:19:59 +05:30
_frequencies = [ ]
_increments = [ ]
_times = [ ]
2011-02-01 23:55:40 +05:30
increment = 0
2012-09-05 20:44:36 +05:30
startingIncrement = 0
2011-06-15 23:19:59 +05:30
position = 0
2016-03-03 15:13:43 +05:30
# this is a dummy at the moment, we need to parse the load file and figure out what time a particular increment corresponds to
time = 0.0
2011-02-01 23:55:40 +05:30
N_nodes = 0
N_node_scalars = 0
N_elements = 0
N_element_scalars = 0
N_element_tensors = 0
def __init__ ( self , filename ) :
self . file = open ( filename , ' rb ' )
2011-07-21 21:15:41 +05:30
self . filesize = os . path . getsize ( filename )
self . dataOffset = 0
while self . dataOffset < self . filesize :
self . file . seek ( self . dataOffset )
2018-10-19 10:56:27 +05:30
if self . file . read ( 3 ) == b ' eoh ' : break
2011-07-21 21:15:41 +05:30
self . dataOffset + = 1
self . dataOffset + = 7
2015-04-01 15:32:21 +05:30
#search first for the new keywords with ':', if not found try to find the old ones
2015-03-15 20:59:38 +05:30
self . theTitle = self . _keyedString ( ' load: ' )
2016-03-03 15:13:43 +05:30
if self . theTitle is None :
2015-03-15 20:59:38 +05:30
self . theTitle = self . _keyedString ( ' load ' )
self . wd = self . _keyedString ( ' workingdir: ' )
2016-03-03 15:13:43 +05:30
if self . wd is None :
2015-03-15 20:59:38 +05:30
self . wd = self . _keyedString ( ' workingdir ' )
self . geometry = self . _keyedString ( ' geometry: ' )
2016-03-03 15:13:43 +05:30
if self . geometry is None :
2015-03-15 20:59:38 +05:30
self . geometry = self . _keyedString ( ' geometry ' )
2015-04-01 15:32:21 +05:30
self . N_loadcases = self . _keyedPackedArray ( ' loadcases: ' , count = 1 , type = ' i ' ) [ 0 ]
2016-03-03 15:13:43 +05:30
if self . N_loadcases is None :
2015-04-01 15:32:21 +05:30
self . N_loadcases = self . _keyedPackedArray ( ' loadcases ' , count = 1 , type = ' i ' ) [ 0 ]
2015-03-15 20:59:38 +05:30
2015-04-01 15:32:21 +05:30
self . _frequencies = self . _keyedPackedArray ( ' frequencies: ' , count = self . N_loadcases , type = ' i ' )
2016-03-03 15:13:43 +05:30
if all ( i is None for i in self . _frequencies ) :
2015-04-01 15:32:21 +05:30
self . _frequencies = self . _keyedPackedArray ( ' frequencies ' , count = self . N_loadcases , type = ' i ' )
2016-07-18 21:59:12 +05:30
2015-03-15 20:59:38 +05:30
self . _increments = self . _keyedPackedArray ( ' increments: ' , count = self . N_loadcases , type = ' i ' )
2016-03-03 15:13:43 +05:30
if all ( i is None for i in self . _increments ) :
2015-03-15 20:59:38 +05:30
self . _increments = self . _keyedPackedArray ( ' increments ' , count = self . N_loadcases , type = ' i ' )
2015-04-01 15:32:21 +05:30
self . startingIncrement = self . _keyedPackedArray ( ' startingIncrement: ' , count = 1 , type = ' i ' ) [ 0 ]
2016-03-03 15:13:43 +05:30
if self . startingIncrement is None :
2015-04-01 15:32:21 +05:30
self . startingIncrement = self . _keyedPackedArray ( ' startingIncrement ' , count = 1 , type = ' i ' ) [ 0 ]
2015-03-15 20:59:38 +05:30
2016-07-18 21:59:12 +05:30
2015-04-01 15:32:21 +05:30
self . _times = self . _keyedPackedArray ( ' times: ' , count = self . N_loadcases , type = ' d ' )
2016-03-03 15:13:43 +05:30
if all ( i is None for i in self . _times ) :
2015-04-01 15:32:21 +05:30
self . _times = self . _keyedPackedArray ( ' times ' , count = self . N_loadcases , type = ' d ' )
2015-03-15 20:59:38 +05:30
2015-04-01 15:32:21 +05:30
self . _logscales = self . _keyedPackedArray ( ' logscales: ' , count = self . N_loadcases , type = ' i ' )
2016-03-03 15:13:43 +05:30
if all ( i is None for i in self . _logscales ) :
2015-04-01 15:32:21 +05:30
self . _logscales = self . _keyedPackedArray ( ' logscales ' , count = self . N_loadcases , type = ' i ' )
2016-07-18 21:59:12 +05:30
2015-03-15 20:59:38 +05:30
self . size = self . _keyedPackedArray ( ' size: ' , count = 3 , type = ' d ' )
2015-04-01 15:32:21 +05:30
if self . size == [ None , None , None ] : # no 'size' found, try legacy alias 'dimension'
2015-03-11 14:25:48 +05:30
self . size = self . _keyedPackedArray ( ' dimension ' , count = 3 , type = ' d ' )
2016-07-18 21:59:12 +05:30
2015-03-15 20:59:38 +05:30
self . grid = self . _keyedPackedArray ( ' grid: ' , count = 3 , type = ' i ' )
2015-04-01 15:32:21 +05:30
if self . grid == [ None , None , None ] : # no 'grid' found, try legacy alias 'resolution'
2015-03-11 14:25:48 +05:30
self . grid = self . _keyedPackedArray ( ' resolution ' , count = 3 , type = ' i ' )
2016-07-18 21:59:12 +05:30
2015-03-11 14:25:48 +05:30
self . N_nodes = ( self . grid [ 0 ] + 1 ) * ( self . grid [ 1 ] + 1 ) * ( self . grid [ 2 ] + 1 )
self . N_elements = self . grid [ 0 ] * self . grid [ 1 ] * self . grid [ 2 ]
2015-03-15 20:59:38 +05:30
2015-04-01 15:32:21 +05:30
self . N_element_scalars = self . _keyedPackedArray ( ' materialpoint_sizeResults: ' , count = 1 , type = ' i ' ) [ 0 ]
2016-03-03 15:13:43 +05:30
if self . N_element_scalars is None :
2015-04-01 15:32:21 +05:30
self . N_element_scalars = self . _keyedPackedArray ( ' materialpoint_sizeResults ' , count = 1 , type = ' i ' ) [ 0 ]
2015-03-15 20:59:38 +05:30
2018-11-15 16:46:55 +05:30
self . N_positions = ( self . filesize - self . dataOffset ) / / ( self . N_elements * self . N_element_scalars * 8 )
2011-06-15 23:19:59 +05:30
self . N_increments = 1 # add zero'th entry
for i in range ( self . N_loadcases ) :
self . N_increments + = self . _increments [ i ] / / self . _frequencies [ i ]
2016-07-18 21:59:12 +05:30
2015-08-11 00:18:48 +05:30
# parameters for file handling depending on output format
2015-04-01 15:32:21 +05:30
if options . legacy :
2015-08-11 00:18:48 +05:30
self . tagLen = 8
self . fourByteLimit = 2 * * 31 - 1 - 8
2015-04-01 15:32:21 +05:30
else :
2015-08-11 00:18:48 +05:30
self . tagLen = 0
2015-09-24 22:37:27 +05:30
self . expectedFileSize = self . dataOffset + self . N_increments * ( self . tagLen + self . N_elements * self . N_element_scalars * 8 )
if options . legacy : self . expectedFileSize + = self . expectedFileSize / / self . fourByteLimit * 8 # add extra 8 bytes for additional headers at 4 GB limits
if self . expectedFileSize != self . filesize :
2016-10-25 00:46:29 +05:30
print ( ' \n ** \n * Unexpected file size. Incomplete simulation or file corrupted! \n ** ' )
2015-08-11 00:18:48 +05:30
def __str__ ( self ) :
2016-03-03 15:13:43 +05:30
""" Summary of results file """
2011-02-01 23:55:40 +05:30
return ' \n ' . join ( [
' workdir: %s ' % self . wd ,
2011-02-21 22:00:18 +05:30
' geometry: %s ' % self . geometry ,
2011-06-15 23:19:59 +05:30
' loadcases: %i ' % self . N_loadcases ,
2015-03-11 14:25:48 +05:30
' grid: %s ' % ( ' , ' . join ( map ( str , self . grid ) ) ) ,
' size: %s ' % ( ' , ' . join ( map ( str , self . size ) ) ) ,
2011-06-21 18:08:58 +05:30
' header size: %i ' % self . dataOffset ,
2011-07-21 21:15:41 +05:30
' actual file size: %i ' % self . filesize ,
2015-09-24 22:37:27 +05:30
' expected file size: %i ' % self . expectedFileSize ,
2011-07-21 21:15:41 +05:30
' positions in file : %i ' % self . N_positions ,
2012-09-05 20:44:36 +05:30
' starting increment: %i ' % self . startingIncrement ,
2011-02-01 23:55:40 +05:30
]
)
2011-06-21 18:08:58 +05:30
def locateKeyValue ( self , identifier ) :
2011-02-01 23:55:40 +05:30
2015-04-01 15:32:21 +05:30
key = { ' name ' : None , ' pos ' : None }
2011-06-21 18:08:58 +05:30
2015-04-01 15:32:21 +05:30
name = ' '
filepos = 0 # start at the beginning
while name != identifier and filepos < self . dataOffset : # stop searching when found or when reached end of header
2016-07-18 21:59:12 +05:30
self . file . seek ( filepos )
2016-03-03 15:13:43 +05:30
# read the starting tag in front of the keyword (Fortran indicates start and end of writing by a 4 byte tag indicating the length of the following data)
dataLen = struct . unpack ( ' i ' , self . file . read ( 4 ) ) [ 0 ]
2018-10-19 10:56:27 +05:30
name = self . file . read ( len ( identifier ) ) . decode ( errors = " ignore " ) # anticipate identifier
2016-07-18 21:59:12 +05:30
start = filepos + ( 4 + len ( identifier ) ) # position of the values for the found key
2016-03-03 15:13:43 +05:30
filepos = filepos + ( 4 + dataLen + 4 ) # forward to next keyword
2016-07-18 21:59:12 +05:30
2016-03-03 15:13:43 +05:30
if name == identifier : # found the correct name
key [ ' pos ' ] = start # save position
2015-04-01 15:32:21 +05:30
key [ ' name ' ] = name
2016-07-18 21:59:12 +05:30
return key
2011-06-21 18:08:58 +05:30
def _keyedPackedArray ( self , identifier , count = 3 , type = ' d ' , default = None ) :
bytecount = { ' d ' : 8 , ' i ' : 4 }
values = [ default ] * count
key = self . locateKeyValue ( identifier )
2016-03-03 15:13:43 +05:30
if key [ ' name ' ] == identifier and key [ ' pos ' ] is not None :
2011-06-21 18:08:58 +05:30
self . file . seek ( key [ ' pos ' ] )
for i in range ( count ) :
values [ i ] = struct . unpack ( type , self . file . read ( bytecount [ type ] ) ) [ 0 ]
2011-06-15 23:19:59 +05:30
return values
2011-06-21 18:08:58 +05:30
2011-06-15 23:19:59 +05:30
def _keyedString ( self , identifier , default = None ) :
value = default
2011-02-01 23:55:40 +05:30
self . file . seek ( 0 )
2018-10-19 10:56:27 +05:30
m = re . search ( r ' (. {4} ) %s (.*?) \ 1 ' % identifier , self . file . read ( self . dataOffset ) . decode ( errors = " ignore " ) , re . DOTALL )
2011-02-01 23:55:40 +05:30
if m :
value = m . group ( 2 )
return value
def title ( self ) :
return self . theTitle
2011-06-15 23:19:59 +05:30
def moveto ( self , pos ) :
self . position = pos
self . increment = 0
self . time = 0.0
p = pos
for l in range ( self . N_loadcases ) :
if p < = self . _increments [ l ] / / self . _frequencies [ l ] :
break
else :
self . increment + = self . _increments [ l ]
self . time + = self . _times [ l ]
p - = self . _increments [ l ] / / self . _frequencies [ l ]
2012-01-12 13:36:25 +05:30
2011-06-15 23:19:59 +05:30
self . increment + = self . _frequencies [ l ] * p
2012-01-12 13:36:25 +05:30
if self . _logscales [ l ] > 0 : # logarithmic time scale
if l == 0 : self . time = 2 * * ( self . _increments [ l ] - ( 1 + self . _frequencies [ l ] * p ) ) * self . _times [ l ] # first loadcase
else : self . time * = ( ( self . time + self . _times [ l ] ) / self . time ) * * ( ( 1 + self . _frequencies [ l ] * p ) / self . _increments [ l ] ) # any subsequent loadcase
else : # linear time scale
self . time + = self . _times [ l ] / self . _increments [ l ] * self . _frequencies [ l ] * p
2011-02-01 23:55:40 +05:30
def extrapolation ( self , value ) :
self . extrapolate = value
2011-03-11 22:02:01 +05:30
def node_sequence ( self , n ) :
2011-04-13 22:01:44 +05:30
return n - 1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node_id ( self , n ) :
return n + 1
2011-02-01 23:55:40 +05:30
2011-03-11 22:02:01 +05:30
def node ( self , n ) :
2015-03-11 14:25:48 +05:30
a = self . grid [ 0 ] + 1
b = self . grid [ 1 ] + 1
c = self . grid [ 2 ] + 1
2018-11-15 16:58:36 +05:30
return vector ( [ self . size [ 0 ] * ( n % a ) / self . grid [ 0 ] ,
self . size [ 1 ] * ( ( n / / a ) % b ) / self . grid [ 1 ] ,
self . size [ 2 ] * ( ( n / / a / / b ) % c ) / self . grid [ 2 ] ,
2011-04-12 23:16:35 +05:30
] )
2011-03-11 22:02:01 +05:30
2011-04-13 22:01:44 +05:30
def element_sequence ( self , e ) :
return e - 1
2016-07-18 21:59:12 +05:30
2011-03-11 22:02:01 +05:30
def element_id ( self , e ) :
return e + 1
2016-07-18 21:59:12 +05:30
2011-03-11 22:02:01 +05:30
def element ( self , e ) :
2015-03-11 14:25:48 +05:30
a = self . grid [ 0 ] + 1
b = self . grid [ 1 ] + 1
2018-11-15 16:58:36 +05:30
basenode = 1 + e + e / / self . grid [ 0 ] + e / / self . grid [ 0 ] / / self . grid [ 1 ] * a
2011-06-15 23:19:59 +05:30
basenode2 = basenode + a * b
return ( element ( [ basenode , basenode + 1 , basenode + a + 1 , basenode + a ,
basenode2 , basenode2 + 1 , basenode2 + a + 1 , basenode2 + a ,
] , 117 ) )
2011-02-01 23:55:40 +05:30
def increments ( self ) :
2011-07-21 21:15:41 +05:30
return self . N_positions
2011-02-01 23:55:40 +05:30
def nodes ( self ) :
return self . N_nodes
def node_scalars ( self ) :
return self . N_node_scalars
def elements ( self ) :
return self . N_elements
def element_scalars ( self ) :
return self . N_element_scalars
2011-03-11 22:02:01 +05:30
def element_scalar ( self , e , idx ) :
2015-04-01 15:32:21 +05:30
if not options . legacy :
incStart = self . dataOffset \
+ self . position * 8 * self . N_elements * self . N_element_scalars
where = ( e * self . N_element_scalars + idx ) * 8
try :
self . file . seek ( incStart + where )
value = struct . unpack ( ' d ' , self . file . read ( 8 ) ) [ 0 ]
except :
2016-10-25 00:46:29 +05:30
print ( ' seeking {} ' . format ( incStart + where ) )
print ( ' e {} idx {} ' . format ( e , idx ) )
2015-04-01 15:32:21 +05:30
sys . exit ( 1 )
2016-07-18 21:59:12 +05:30
2015-04-01 15:32:21 +05:30
else :
2015-08-11 00:18:48 +05:30
self . fourByteLimit = 2 * * 31 - 1 - 8
2016-03-03 15:13:43 +05:30
# header & footer + extra header and footer for 4 byte int range (Fortran)
# values
2015-04-01 15:32:21 +05:30
incStart = self . dataOffset \
2015-08-11 00:18:48 +05:30
+ self . position * 8 * ( 1 + self . N_elements * self . N_element_scalars * 8 / / self . fourByteLimit \
2015-04-01 15:32:21 +05:30
+ self . N_elements * self . N_element_scalars )
where = ( e * self . N_element_scalars + idx ) * 8
try :
2016-03-03 15:13:43 +05:30
if where % self . fourByteLimit + 8 > = self . fourByteLimit : # danger of reading into fortran record footer at 4 byte limit
2015-04-01 15:32:21 +05:30
data = ' '
2016-10-25 00:46:29 +05:30
for i in range ( 8 ) :
2015-08-11 00:18:48 +05:30
self . file . seek ( incStart + where + ( where / / self . fourByteLimit ) * 8 + 4 )
2015-04-01 15:32:21 +05:30
data + = self . file . read ( 1 )
where + = 1
value = struct . unpack ( ' d ' , data ) [ 0 ]
2016-07-18 21:59:12 +05:30
else :
2015-08-11 00:18:48 +05:30
self . file . seek ( incStart + where + ( where / / self . fourByteLimit ) * 8 + 4 )
2015-04-01 15:32:21 +05:30
value = struct . unpack ( ' d ' , self . file . read ( 8 ) ) [ 0 ]
except :
2016-10-25 00:46:29 +05:30
print ( ' seeking {} ' . format ( incStart + where + ( where / / self . fourByteLimit ) * 8 + 4 ) )
print ( ' e {} idx {} ' . format ( e , idx ) )
2015-04-01 15:32:21 +05:30
sys . exit ( 1 )
2011-03-11 22:02:01 +05:30
return [ elemental_scalar ( node , value ) for node in self . element ( e ) . items ]
2011-02-01 23:55:40 +05:30
def element_scalar_label ( elem , idx ) :
return ' User Defined Variable %i ' % ( idx + 1 )
def element_tensors ( self ) :
return self . N_element_tensors
2010-08-17 02:17:27 +05:30
# -----------------------------
def ipCoords ( elemType , nodalCoordinates ) :
2016-10-25 00:46:29 +05:30
""" Returns IP coordinates for a given element """
2016-07-18 21:59:12 +05:30
nodeWeightsPerNode = {
7 : [ [ 27.0 , 9.0 , 3.0 , 9.0 , 9.0 , 3.0 , 1.0 , 3.0 ] ,
[ 9.0 , 27.0 , 9.0 , 3.0 , 3.0 , 9.0 , 3.0 , 1.0 ] ,
[ 3.0 , 9.0 , 27.0 , 9.0 , 1.0 , 3.0 , 9.0 , 3.0 ] ,
[ 9.0 , 3.0 , 9.0 , 27.0 , 3.0 , 1.0 , 3.0 , 9.0 ] ,
[ 9.0 , 3.0 , 1.0 , 3.0 , 27.0 , 9.0 , 3.0 , 9.0 ] ,
[ 3.0 , 9.0 , 3.0 , 1.0 , 9.0 , 27.0 , 9.0 , 3.0 ] ,
[ 1.0 , 3.0 , 9.0 , 3.0 , 3.0 , 9.0 , 27.0 , 9.0 ] ,
[ 3.0 , 1.0 , 3.0 , 9.0 , 9.0 , 3.0 , 9.0 , 27.0 ] ] ,
57 : [ [ 27.0 , 9.0 , 3.0 , 9.0 , 9.0 , 3.0 , 1.0 , 3.0 ] ,
[ 9.0 , 27.0 , 9.0 , 3.0 , 3.0 , 9.0 , 3.0 , 1.0 ] ,
[ 3.0 , 9.0 , 27.0 , 9.0 , 1.0 , 3.0 , 9.0 , 3.0 ] ,
[ 9.0 , 3.0 , 9.0 , 27.0 , 3.0 , 1.0 , 3.0 , 9.0 ] ,
[ 9.0 , 3.0 , 1.0 , 3.0 , 27.0 , 9.0 , 3.0 , 9.0 ] ,
[ 3.0 , 9.0 , 3.0 , 1.0 , 9.0 , 27.0 , 9.0 , 3.0 ] ,
[ 1.0 , 3.0 , 9.0 , 3.0 , 3.0 , 9.0 , 27.0 , 9.0 ] ,
[ 3.0 , 1.0 , 3.0 , 9.0 , 9.0 , 3.0 , 9.0 , 27.0 ] ] ,
117 : [ [ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 ] ] ,
2011-04-12 23:16:35 +05:30
125 : [ [ 3.0 , 0.0 , 0.0 , 4.0 , 1.0 , 4.0 ] ,
2011-05-05 14:46:29 +05:30
[ 0.0 , 3.0 , 0.0 , 4.0 , 4.0 , 1.0 ] ,
2016-07-18 21:59:12 +05:30
[ 0.0 , 0.0 , 3.0 , 1.0 , 4.0 , 4.0 ] , ] ,
2013-01-28 21:55:43 +05:30
127 : [ [ 45.0 , 17.0 , 17.0 , 17.0 ] ,
[ 17.0 , 45.0 , 17.0 , 17.0 ] ,
[ 17.0 , 17.0 , 45.0 , 17.0 ] ,
2016-07-18 21:59:12 +05:30
[ 17.0 , 17.0 , 17.0 , 45.0 ] , ] ,
136 : [ [ 42.0 , 15.0 , 15.0 , 14.0 , 5.0 , 5.0 ] ,
[ 15.0 , 42.0 , 15.0 , 5.0 , 14.0 , 5.0 ] ,
[ 15.0 , 15.0 , 42.0 , 5.0 , 5.0 , 14.0 ] ,
[ 14.0 , 5.0 , 5.0 , 42.0 , 15.0 , 15.0 ] ,
[ 5.0 , 14.0 , 5.0 , 15.0 , 42.0 , 15.0 ] ,
[ 5.0 , 5.0 , 14.0 , 15.0 , 15.0 , 42.0 ] ] ,
2011-04-12 23:16:35 +05:30
}
2016-07-18 21:59:12 +05:30
2011-05-05 14:46:29 +05:30
Nips = len ( nodeWeightsPerNode [ elemType ] )
ipCoordinates = [ [ 0.0 , 0.0 , 0.0 ] for i in range ( Nips ) ]
for ip in range ( Nips ) :
2010-08-17 02:17:27 +05:30
for node in range ( len ( nodeWeightsPerNode [ elemType ] [ ip ] ) ) :
for i in range ( 3 ) :
ipCoordinates [ ip ] [ i ] + = nodeWeightsPerNode [ elemType ] [ ip ] [ node ] * nodalCoordinates [ node ] [ i ]
for i in range ( 3 ) :
ipCoordinates [ ip ] [ i ] / = sum ( nodeWeightsPerNode [ elemType ] [ ip ] )
2016-07-18 21:59:12 +05:30
2010-08-17 02:17:27 +05:30
return ipCoordinates
2011-05-06 15:30:27 +05:30
# -----------------------------
def ipIDs ( elemType ) :
2016-10-25 00:46:29 +05:30
""" Returns IP numbers for given element type """
2016-07-18 21:59:12 +05:30
ipPerNode = {
7 : [ 1 , 2 , 4 , 3 , 5 , 6 , 8 , 7 ] ,
57 : [ 1 , 2 , 4 , 3 , 5 , 6 , 8 , 7 ] ,
2011-05-06 15:30:27 +05:30
117 : [ 1 ] ,
2016-07-18 21:59:12 +05:30
125 : [ 1 , 2 , 3 ] ,
127 : [ 1 , 2 , 3 , 4 ] ,
136 : [ 1 , 2 , 3 , 4 , 5 , 6 ] ,
2011-05-06 15:30:27 +05:30
}
2016-07-18 21:59:12 +05:30
2011-05-06 15:30:27 +05:30
return ipPerNode [ elemType ]
2010-08-17 02:17:27 +05:30
# -----------------------------
def substituteLocation ( string , mesh , coords ) :
2016-10-25 00:46:29 +05:30
""" Do variable interpolation in group and filter strings """
2010-08-17 02:17:27 +05:30
substitute = string
substitute = substitute . replace ( ' elem ' , str ( mesh [ 0 ] ) )
substitute = substitute . replace ( ' node ' , str ( mesh [ 1 ] ) )
2011-05-06 15:30:27 +05:30
substitute = substitute . replace ( ' ip ' , str ( mesh [ 2 ] ) )
substitute = substitute . replace ( ' grain ' , str ( mesh [ 3 ] ) )
2010-08-17 02:17:27 +05:30
substitute = substitute . replace ( ' x ' , ' %.6g ' % coords [ 0 ] )
substitute = substitute . replace ( ' y ' , ' %.6g ' % coords [ 1 ] )
substitute = substitute . replace ( ' z ' , ' %.6g ' % coords [ 2 ] )
return substitute
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-04-12 23:16:35 +05:30
def heading ( glue , parts ) :
2016-10-25 00:46:29 +05:30
""" Joins pieces from parts by glue. second to last entry in pieces tells multiplicity """
2011-04-12 23:16:35 +05:30
header = [ ]
for pieces in parts :
if pieces [ - 2 ] == 0 :
del pieces [ - 2 ]
header . append ( glue . join ( map ( str , pieces ) ) )
return header
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
# -----------------------------
def mapIncremental ( label , mapping , N , base , new ) :
2016-03-03 15:13:43 +05:30
"""
2016-10-25 00:46:29 +05:30
Applies the function defined by " mapping "
2011-04-12 23:16:35 +05:30
2016-03-03 15:13:43 +05:30
( can be either ' min ' , ' max ' , ' avg ' , ' sum ' , or user specified )
to a list of data
"""
2015-10-28 01:46:24 +05:30
theMap = { ' min ' : lambda n , b , a : a if n == 0 else min ( b , a ) ,
' max ' : lambda n , b , a : a if n == 0 else max ( b , a ) ,
2011-04-13 22:01:44 +05:30
' avg ' : lambda n , b , a : ( n * b + a ) / ( n + 1 ) ,
2011-07-15 16:57:47 +05:30
' avgabs ' : lambda n , b , a : ( n * b + abs ( a ) ) / ( n + 1 ) ,
2015-10-28 01:46:24 +05:30
' sum ' : lambda n , b , a : a if n == 0 else b + a ,
' sumabs ' : lambda n , b , a : abs ( a ) if n == 0 else b + abs ( a ) ,
2017-08-29 05:02:59 +05:30
' unique ' : lambda n , b , a : a if n == 0 or b == a else ' nan '
2011-04-13 22:01:44 +05:30
}
if mapping in theMap :
2018-11-17 16:32:44 +05:30
mapped = list ( map ( theMap [ mapping ] , [ N for i in range ( len ( base ) ) ] , base , new ) ) # map one of the standard functions to data
2011-04-13 22:01:44 +05:30
if label . lower ( ) == ' orientation ' : # orientation is special case:...
orientationNorm = math . sqrt ( sum ( [ q * q for q in mapped ] ) ) # ...calc norm of average quaternion
2018-11-17 16:32:44 +05:30
mapped = list ( map ( lambda x : x / orientationNorm , mapped ) ) # ...renormalize quaternion
2010-08-17 02:17:27 +05:30
else :
2011-04-13 22:01:44 +05:30
try :
2018-11-17 16:32:44 +05:30
mapped = list ( eval ( ' map( %s ,[N for i in range(len(base))],base,new) ' % mapping ) ) # map user defined function to colums in chunks
2011-04-13 22:01:44 +05:30
except :
2018-11-17 16:32:44 +05:30
mapped = [ ' nan ' for i in range ( len ( base ) ) ]
2011-04-13 22:01:44 +05:30
2018-11-17 15:43:51 +05:30
return list ( mapped )
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
2010-08-17 02:17:27 +05:30
# -----------------------------
2012-03-05 20:52:10 +05:30
def OpenPostfile ( name , type , nodal = False ) :
2016-10-25 00:46:29 +05:30
""" Open postfile with extrapolation mode ' translate ' """
2017-10-03 23:33:50 +05:30
p = {
' spectral ' : MPIEspectral_result ,
' marc ' : post_open ,
2011-06-21 18:08:58 +05:30
} [ type ] ( name )
2012-03-05 20:52:10 +05:30
p . extrapolation ( { True : ' linear ' , False : ' translate ' } [ nodal ] )
2011-04-12 23:16:35 +05:30
p . moveto ( 1 )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
return p
2010-08-17 02:17:27 +05:30
# -----------------------------
def ParseOutputFormat ( filename , what , me ) :
2016-10-25 00:46:29 +05:30
""" Parse .output* files in order to get a list of outputs """
2011-06-15 23:19:59 +05:30
content = [ ]
2011-04-12 23:16:35 +05:30
format = { ' outputs ' : { } , ' specials ' : { ' brothers ' : [ ] } }
2018-10-19 10:56:27 +05:30
for prefix in [ ' ' ] + list ( map ( str , range ( 1 , 17 ) ) ) :
2011-04-12 23:16:35 +05:30
if os . path . exists ( prefix + filename + ' .output ' + what ) :
2011-06-15 23:19:59 +05:30
try :
file = open ( prefix + filename + ' .output ' + what )
content = file . readlines ( )
file . close ( )
break
except :
pass
2016-07-18 21:59:12 +05:30
2011-06-15 23:19:59 +05:30
if content == [ ] : return format # nothing found...
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
tag = ' '
tagID = 0
for line in content :
if re . match ( " \ s*$ " , line ) or re . match ( " # " , line ) : # skip blank lines and comments
continue
2011-07-21 21:15:41 +05:30
m = re . match ( " \ [(.+) \ ] " , line ) # look for block indicator
if m : # next section
2011-04-12 23:16:35 +05:30
tag = m . group ( 1 )
tagID + = 1
format [ ' specials ' ] [ ' brothers ' ] . append ( tag )
if tag == me or ( me . isdigit ( ) and tagID == int ( me ) ) :
format [ ' specials ' ] [ ' _id ' ] = tagID
format [ ' outputs ' ] = [ ]
tag = me
else : # data from section
if tag == me :
( output , length ) = line . split ( )
output . lower ( )
if length . isdigit ( ) :
length = int ( length )
if re . match ( " \ ((.+) \ ) " , output ) : # special data, e.g. (Ngrains)
format [ ' specials ' ] [ output ] = length
elif length > 0 :
format [ ' outputs ' ] . append ( [ output , length ] )
return format
2010-08-17 02:17:27 +05:30
# -----------------------------
2015-03-29 14:38:42 +05:30
def ParsePostfile ( p , filename , outputFormat ) :
2016-03-03 15:13:43 +05:30
"""
2016-10-25 00:46:29 +05:30
Parse postfile in order to get position and labels of outputs
2011-04-12 23:16:35 +05:30
2016-03-03 15:13:43 +05:30
needs " outputFormat " for mapping of output names to postfile output indices
"""
2017-10-03 23:33:50 +05:30
stat = {
' IndexOfLabel ' : { } ,
' Title ' : p . title ( ) ,
' Extrapolation ' : p . extrapolate ,
' NumberOfIncrements ' : p . increments ( ) ,
' NumberOfNodes ' : p . nodes ( ) ,
' NumberOfNodalScalars ' : p . node_scalars ( ) ,
' LabelOfNodalScalar ' : [ None ] * p . node_scalars ( ) ,
' NumberOfElements ' : p . elements ( ) ,
' NumberOfElementalScalars ' : p . element_scalars ( ) ,
' LabelOfElementalScalar ' : [ None ] * p . element_scalars ( ) ,
' NumberOfElementalTensors ' : p . element_tensors ( ) ,
' LabelOfElementalTensor ' : [ None ] * p . element_tensors ( ) ,
2011-04-12 23:16:35 +05:30
}
2016-07-18 21:59:12 +05:30
# --- find labels
2011-04-12 23:16:35 +05:30
for labelIndex in range ( stat [ ' NumberOfNodalScalars ' ] ) :
label = p . node_scalar_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfNodalScalar ' ] [ labelIndex ] = label
for labelIndex in range ( stat [ ' NumberOfElementalScalars ' ] ) :
label = p . element_scalar_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfElementalScalar ' ] [ labelIndex ] = label
for labelIndex in range ( stat [ ' NumberOfElementalTensors ' ] ) :
label = p . element_tensor_label ( labelIndex )
stat [ ' IndexOfLabel ' ] [ label ] = labelIndex
stat [ ' LabelOfElementalTensor ' ] [ labelIndex ] = label
2016-07-18 21:59:12 +05:30
2011-11-23 14:39:00 +05:30
if ' User Defined Variable 1 ' in stat [ ' IndexOfLabel ' ] : # output format without dedicated names?
2015-03-29 14:38:42 +05:30
stat [ ' IndexOfLabel ' ] [ ' HomogenizationCount ' ] = stat [ ' IndexOfLabel ' ] [ ' User Defined Variable 1 ' ] # adjust first named entry
2016-07-18 21:59:12 +05:30
2015-03-29 14:38:42 +05:30
if ' HomogenizationCount ' in stat [ ' IndexOfLabel ' ] : # does the result file contain relevant user defined output at all?
startIndex = stat [ ' IndexOfLabel ' ] [ ' HomogenizationCount ' ]
stat [ ' LabelOfElementalScalar ' ] [ startIndex ] = ' HomogenizationCount '
2016-07-18 21:59:12 +05:30
2016-03-03 15:13:43 +05:30
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
2016-07-18 21:59:12 +05:30
# given in the .output* file
2011-11-23 14:39:00 +05:30
offset = 1
for ( name , N ) in outputFormat [ ' Homogenization ' ] [ ' outputs ' ] :
for i in range ( N ) :
label = { False : ' %s ' % ( name ) ,
True : ' %i _ %s ' % ( i + 1 , name ) } [ N > 1 ]
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
offset + = 1
2015-03-29 14:38:42 +05:30
stat [ ' IndexOfLabel ' ] [ ' GrainCount ' ] = startIndex + offset
2015-05-21 05:43:11 +05:30
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = ' GrainCount ' # add GrainCount
2015-03-29 14:38:42 +05:30
offset + = 1
2011-11-23 14:39:00 +05:30
2011-06-21 18:08:58 +05:30
if ' (ngrains) ' in outputFormat [ ' Homogenization ' ] [ ' specials ' ] :
for grain in range ( outputFormat [ ' Homogenization ' ] [ ' specials ' ] [ ' (ngrains) ' ] ) :
2011-11-23 14:39:00 +05:30
2015-03-29 14:38:42 +05:30
stat [ ' IndexOfLabel ' ] [ ' %i _CrystalliteCount ' % ( grain + 1 ) ] = startIndex + offset # report crystallite count
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = ' %i _CrystalliteCount ' % ( grain + 1 ) # add GrainCount
2011-06-21 18:08:58 +05:30
offset + = 1
2011-11-23 14:39:00 +05:30
2015-03-29 14:38:42 +05:30
for ( name , N ) in outputFormat [ ' Crystallite ' ] [ ' outputs ' ] : # add crystallite outputs
2011-11-23 14:39:00 +05:30
for i in range ( N ) :
2015-05-22 11:47:35 +05:30
label = ' %i _ ' % ( grain + 1 ) + ( ' %i _ ' % ( i + 1 ) if N > 1 else ' ' ) + name
2011-11-23 14:39:00 +05:30
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
offset + = 1
2015-05-21 05:43:11 +05:30
stat [ ' IndexOfLabel ' ] [ ' %i _ConstitutiveCount ' % ( grain + 1 ) ] = startIndex + offset # report constitutive count
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = ' %i _ConstitutiveCount ' % ( grain + 1 ) # add GrainCount
2011-06-21 18:08:58 +05:30
offset + = 1
2011-11-23 14:39:00 +05:30
2015-05-21 05:43:11 +05:30
for ( name , N ) in outputFormat [ ' Constitutive ' ] [ ' outputs ' ] : # add constitutive outputs
2011-11-23 14:39:00 +05:30
for i in range ( N ) :
2015-05-21 05:43:11 +05:30
label = ' %i _ ' % ( grain + 1 ) + ( ' %i _ ' % ( i + 1 ) if N > 1 else ' ' ) + name
2011-11-23 14:39:00 +05:30
stat [ ' IndexOfLabel ' ] [ label ] = startIndex + offset
2012-03-05 20:52:10 +05:30
try :
stat [ ' LabelOfElementalScalar ' ] [ startIndex + offset ] = label
except IndexError :
2016-10-25 00:46:29 +05:30
print ( ' trying to assign {} at position {} + {} ' . format ( label , startIndex , offset ) )
2012-03-05 20:52:10 +05:30
sys . exit ( 1 )
2011-11-23 14:39:00 +05:30
offset + = 1
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
return stat
2010-08-17 02:17:27 +05:30
# -----------------------------
2011-06-21 18:08:58 +05:30
def SummarizePostfile ( stat , where = sys . stdout , format = ' marc ' ) :
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
where . write ( ' \n \n ' )
where . write ( ' title: \t %s ' % stat [ ' Title ' ] + ' \n \n ' )
where . write ( ' extraplation: \t %s ' % stat [ ' Extrapolation ' ] + ' \n \n ' )
where . write ( ' increments: \t %i ' % ( stat [ ' NumberOfIncrements ' ] ) + ' \n \n ' )
where . write ( ' nodes: \t %i ' % stat [ ' NumberOfNodes ' ] + ' \n \n ' )
where . write ( ' elements: \t %i ' % stat [ ' NumberOfElements ' ] + ' \n \n ' )
2016-03-03 15:13:43 +05:30
where . write ( ' nodal scalars: \t %i ' % stat [ ' NumberOfNodalScalars ' ] + ' \n \n ' \
+ ' \n ' . join ( stat [ ' LabelOfNodalScalar ' ] ) + ' \n \n ' )
where . write ( ' elemental scalars: \t %i ' % stat [ ' NumberOfElementalScalars ' ] + ' \n \n ' \
+ ' \n ' . join ( stat [ ' LabelOfElementalScalar ' ] ) + ' \n \n ' )
where . write ( ' elemental tensors: \t %i ' % stat [ ' NumberOfElementalTensors ' ] + ' \n \n ' \
+ ' \n ' . join ( stat [ ' LabelOfElementalTensor ' ] ) + ' \n \n ' )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
return True
2010-08-17 02:17:27 +05:30
# -----------------------------
# MAIN FUNCTION STARTS HERE
# -----------------------------
# --- input parsing
2014-12-19 00:56:52 +05:30
parser = OptionParser ( option_class = damask . extendableOption , usage = ' % prog options [file[s]] ' , description = """
2016-07-18 21:59:12 +05:30
Extract data from a . t16 ( MSC . Marc ) or . spectralOut results file .
2010-08-17 02:17:27 +05:30
2016-07-18 21:59:12 +05:30
List of output variables is given by options ' --ns ' , ' --es ' , ' --et ' , ' --ho ' , ' --cr ' , ' --co ' .
2010-08-17 02:17:27 +05:30
2011-05-26 15:11:53 +05:30
Filters and separations use ' elem ' , ' node ' , ' ip ' , ' grain ' , and ' x ' , ' y ' , ' z ' as key words .
2010-08-17 02:17:27 +05:30
Example :
2011-05-26 15:11:53 +05:30
1 ) get averaged results in slices perpendicular to x for all negative y coordinates
- - filter ' y < 0.0 ' - - separation x - - map ' avg '
2010-08-17 02:17:27 +05:30
2 ) global sum of squared data falling into first quadrant arc between R1 and R2
2011-05-26 15:11:53 +05:30
- - filter ' x >= 0.0 and y >= 0.0 and x*x + y*y >= R1*R1 and x*x + y*y <= R2*R2 '
- - map ' lambda n,b,a: n*b+a*a '
User mappings need to be formulated in an incremental fashion for each new data point , a ( dd ) ,
and may use the current ( incremental ) result , b ( ase ) , as well as the number , n ( umber ) ,
of already processed data points for evaluation .
2010-08-17 02:17:27 +05:30
2014-12-19 00:56:52 +05:30
""" , version = scriptID)
2010-08-17 02:17:27 +05:30
2016-05-13 12:58:54 +05:30
parser . add_option ( ' -i ' , ' --info ' , action = ' store_true ' , dest = ' info ' ,
help = ' list contents of resultfile ' )
parser . add_option ( ' -l ' , ' --legacy ' , action = ' store_true ' , dest = ' legacy ' ,
2015-12-15 20:04:03 +05:30
help = ' data format of spectral solver is in legacy format (no MPI out) ' )
2016-05-13 12:58:54 +05:30
parser . add_option ( ' -n ' , ' --nodal ' , action = ' store_true ' , dest = ' nodal ' ,
help = ' data is extrapolated to nodal value ' )
parser . add_option ( ' --prefix ' , dest = ' prefix ' ,
metavar = ' string ' ,
help = ' prefix to result file name ' )
parser . add_option ( ' --suffix ' , dest = ' suffix ' ,
metavar = ' string ' ,
help = ' suffix to result file name ' )
parser . add_option ( ' -d ' , ' --dir ' , dest = ' dir ' ,
metavar = ' string ' ,
2011-07-21 21:15:41 +05:30
help = ' name of subdirectory to hold output [ %d efault] ' )
2016-05-13 12:58:54 +05:30
parser . add_option ( ' -s ' , ' --split ' , action = ' store_true ' , dest = ' separateFiles ' ,
help = ' split output per increment ' )
parser . add_option ( ' -r ' , ' --range ' , dest = ' range ' , type = ' int ' , nargs = 3 ,
metavar = ' int int int ' ,
2011-07-21 21:15:41 +05:30
help = ' range of positions (or increments) to output (start, end, step) [all] ' )
2016-05-13 12:58:54 +05:30
parser . add_option ( ' --increments ' , action = ' store_true ' , dest = ' getIncrements ' ,
help = ' switch to increment range ' )
parser . add_option ( ' -m ' , ' --map ' , dest = ' func ' ,
metavar = ' string ' ,
2015-12-15 20:04:03 +05:30
help = ' data reduction mapping [ %d efault] out of min, max, avg, avgabs, sum, sumabs or user-lambda ' )
2016-05-13 12:58:54 +05:30
parser . add_option ( ' -p ' , ' --type ' , dest = ' filetype ' ,
metavar = ' string ' ,
2011-07-21 21:15:41 +05:30
help = ' type of result file [auto] ' )
2017-10-03 23:33:50 +05:30
parser . add_option ( ' -q ' , ' --quiet ' , dest = ' verbose ' ,
action = ' store_false ' ,
help = ' suppress verbose output ' )
2018-11-15 16:46:55 +05:30
parser . add_option ( ' --verbose ' , dest = ' verbose ' ,
action = ' store_true ' ,
help = ' enable verbose output ' )
2011-06-21 18:08:58 +05:30
2010-08-17 02:17:27 +05:30
group_material = OptionGroup ( parser , ' Material identifier ' )
2016-05-13 12:58:54 +05:30
group_material . add_option ( ' --homogenization ' , dest = ' homog ' ,
help = ' homogenization identifier (as string or integer [ %d efault]) ' , metavar = ' string ' )
group_material . add_option ( ' --crystallite ' , dest = ' cryst ' ,
help = ' crystallite identifier (as string or integer [ %d efault]) ' , metavar = ' string ' )
group_material . add_option ( ' --phase ' , dest = ' phase ' ,
help = ' phase identifier (as string or integer [ %d efault]) ' , metavar = ' string ' )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
group_special = OptionGroup ( parser , ' Special outputs ' )
2016-05-13 12:58:54 +05:30
group_special . add_option ( ' -t ' , ' --time ' , action = ' store_true ' , dest = ' time ' ,
2011-07-21 21:15:41 +05:30
help = ' output time of increment [ %d efault] ' )
2016-05-13 12:58:54 +05:30
group_special . add_option ( ' -f ' , ' --filter ' , dest = ' filter ' ,
help = ' condition(s) to filter results [ %d efault] ' , metavar = ' string ' )
group_special . add_option ( ' --separation ' , action = ' extend ' , dest = ' sep ' ,
help = ' properties to separate results [ %d efault] ' , metavar = ' <string LIST> ' )
group_special . add_option ( ' --sort ' , action = ' extend ' , dest = ' sort ' ,
help = ' properties to sort results [ %d efault] ' , metavar = ' <string LIST> ' )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
group_general = OptionGroup ( parser , ' General outputs ' )
2016-05-13 12:58:54 +05:30
group_general . add_option ( ' --ns ' , action = ' extend ' , dest = ' nodalScalar ' ,
help = ' nodal scalars to extract ' , metavar = ' <string LIST> ' )
group_general . add_option ( ' --es ' , action = ' extend ' , dest = ' elemScalar ' ,
help = ' elemental scalars to extract ' , metavar = ' <string LIST> ' )
group_general . add_option ( ' --et ' , action = ' extend ' , dest = ' elemTensor ' ,
help = ' elemental tensors to extract ' , metavar = ' <string LIST> ' )
group_general . add_option ( ' --ho ' , action = ' extend ' , dest = ' homogenizationResult ' ,
help = ' homogenization results to extract ' , metavar = ' <string LIST> ' )
group_general . add_option ( ' --cr ' , action = ' extend ' , dest = ' crystalliteResult ' ,
help = ' crystallite results to extract ' , metavar = ' <string LIST> ' )
group_general . add_option ( ' --co ' , action = ' extend ' , dest = ' constitutiveResult ' ,
help = ' constitutive results to extract ' , metavar = ' <string LIST> ' )
2010-08-17 02:17:27 +05:30
parser . add_option_group ( group_material )
parser . add_option_group ( group_general )
parser . add_option_group ( group_special )
2017-10-03 23:33:50 +05:30
parser . set_defaults ( info = False ,
2018-11-15 16:46:55 +05:30
verbose = False ,
2017-10-03 23:33:50 +05:30
legacy = False ,
nodal = False ,
prefix = ' ' ,
suffix = ' ' ,
dir = ' postProc ' ,
filetype = None ,
func = ' avg ' ,
homog = ' 1 ' ,
cryst = ' 1 ' ,
phase = ' 1 ' ,
filter = ' ' ,
sep = [ ] ,
sort = [ ] ,
inc = False ,
time = False ,
separateFiles = False ,
getIncrements = False ,
)
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
( options , files ) = parser . parse_args ( )
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
# --- basic sanity checks
2015-03-13 20:16:40 +05:30
2011-06-21 18:08:58 +05:30
if files == [ ] :
parser . print_help ( )
parser . error ( ' no file specified... ' )
if not os . path . exists ( files [ 0 ] ) :
parser . print_help ( )
parser . error ( ' invalid file " %s " specified... ' % files [ 0 ] )
# --- figure out filetype
2016-03-03 15:13:43 +05:30
if options . filetype is None :
2011-06-21 18:08:58 +05:30
ext = os . path . splitext ( files [ 0 ] ) [ 1 ]
for theType in fileExtensions . keys ( ) :
if ext in fileExtensions [ theType ] :
options . filetype = theType
break
2016-07-18 21:59:12 +05:30
2016-03-03 15:13:43 +05:30
if options . filetype is not None : options . filetype = options . filetype . lower ( )
2011-03-10 15:15:57 +05:30
2011-07-22 17:29:03 +05:30
if options . filetype == ' marc ' : offset_pos = 1
else : offset_pos = 0
2016-07-18 21:59:12 +05:30
2011-07-22 17:29:03 +05:30
2011-06-21 18:08:58 +05:30
# --- more sanity checks
if options . filetype not in [ ' marc ' , ' spectral ' ] :
parser . print_help ( )
parser . error ( ' file type " %s " not supported... ' % options . filetype )
2011-03-10 15:15:57 +05:30
if options . filetype == ' marc ' :
2016-07-18 09:47:22 +05:30
sys . path . append ( damask . solver . Marc ( ) . libraryPath ( ) )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
try :
2016-03-03 15:13:43 +05:30
from py_post import post_open
2011-04-12 23:16:35 +05:30
except :
2011-11-23 14:39:00 +05:30
print ( ' error: no valid Mentat release found ' )
2011-04-12 23:16:35 +05:30
sys . exit ( - 1 )
2011-03-10 15:15:57 +05:30
else :
def post_open ( ) :
return
2011-02-22 21:27:27 +05:30
2010-08-17 02:17:27 +05:30
if options . constitutiveResult and not options . phase :
2011-04-12 23:16:35 +05:30
parser . print_help ( )
parser . error ( ' constitutive results require phase... ' )
2010-08-17 02:17:27 +05:30
2016-03-03 15:13:43 +05:30
if options . nodalScalar and ( options . elemScalar or options . elemTensor \
2010-08-17 02:17:27 +05:30
or options . homogenizationResult or options . crystalliteResult or options . constitutiveResult ) :
2011-04-12 23:16:35 +05:30
parser . print_help ( )
parser . error ( ' not allowed to mix nodal with elemental results... ' )
2010-08-17 02:17:27 +05:30
2014-09-12 20:16:12 +05:30
if not options . nodalScalar : options . nodalScalar = [ ]
if not options . elemScalar : options . elemScalar = [ ]
if not options . elemTensor : options . elemTensor = [ ]
if not options . homogenizationResult : options . homogenizationResult = [ ]
2011-07-21 21:15:41 +05:30
if not options . crystalliteResult : options . crystalliteResult = [ ]
2014-09-12 20:16:12 +05:30
if not options . constitutiveResult : options . constitutiveResult = [ ]
2010-08-17 02:17:27 +05:30
2011-04-13 22:01:44 +05:30
options . sort . reverse ( )
2011-07-21 21:15:41 +05:30
options . sep . reverse ( )
2011-04-13 22:01:44 +05:30
2011-03-11 22:02:01 +05:30
# --- start background messaging
2017-10-03 23:33:50 +05:30
if options . verbose :
bg = damask . util . backgroundMessage ( )
bg . start ( )
2010-08-17 02:17:27 +05:30
2011-03-11 22:02:01 +05:30
# --- parse .output and .t16 files
2010-08-17 02:17:27 +05:30
2011-06-21 18:08:58 +05:30
if os . path . splitext ( files [ 0 ] ) [ 1 ] == ' ' :
filename = files [ 0 ]
extension = fileExtensions [ options . filetype ]
else :
filename = os . path . splitext ( files [ 0 ] ) [ 0 ]
extension = os . path . splitext ( files [ 0 ] ) [ 1 ]
2010-08-17 02:17:27 +05:30
outputFormat = { }
2016-07-18 21:59:12 +05:30
me = {
2011-04-12 23:16:35 +05:30
' Homogenization ' : options . homog ,
' Crystallite ' : options . cryst ,
' Constitutive ' : options . phase ,
2010-08-17 02:17:27 +05:30
}
2011-03-11 22:02:01 +05:30
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' parsing .output files... ' )
2011-03-11 22:02:01 +05:30
2010-08-17 02:17:27 +05:30
for what in me :
2011-04-12 23:16:35 +05:30
outputFormat [ what ] = ParseOutputFormat ( filename , what , me [ what ] )
2016-03-03 15:13:43 +05:30
if ' _id ' not in outputFormat [ what ] [ ' specials ' ] :
2016-10-25 00:46:29 +05:30
print ( " \n section ' {} ' not found in < {} > " . format ( me [ what ] , what ) )
print ( ' \n ' . join ( map ( lambda x : ' [ %s ] ' % x , outputFormat [ what ] [ ' specials ' ] [ ' brothers ' ] ) ) )
2016-07-18 21:59:12 +05:30
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' opening result file... ' )
2012-03-05 20:52:10 +05:30
p = OpenPostfile ( filename + extension , options . filetype , options . nodal )
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' parsing result file... ' )
2015-03-29 14:38:42 +05:30
stat = ParsePostfile ( p , filename , outputFormat )
2011-03-11 22:02:01 +05:30
if options . filetype == ' marc ' :
2011-04-12 23:16:35 +05:30
stat [ ' NumberOfIncrements ' ] - = 1 # t16 contains one "virtual" increment (at 0)
2010-08-17 02:17:27 +05:30
# --- sanity check for output variables
2016-03-03 15:13:43 +05:30
# for mentat variables (nodalScalar,elemScalar,elemTensor) we simply have to check whether the label
# is found in the stat[indexOfLabel] dictionary for user defined variables (homogenizationResult,
2016-07-18 21:59:12 +05:30
# crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the
2016-03-03 15:13:43 +05:30
# namescheme in stat['IndexOfLabel'] is different
2010-08-17 02:17:27 +05:30
2011-07-21 21:15:41 +05:30
for opt in [ ' nodalScalar ' , ' elemScalar ' , ' elemTensor ' , ' homogenizationResult ' , ' crystalliteResult ' , ' constitutiveResult ' ] :
2011-04-12 23:16:35 +05:30
if eval ( ' options. %s ' % opt ) :
for label in eval ( ' options. %s ' % opt ) :
2011-07-21 21:15:41 +05:30
if ( opt in [ ' nodalScalar ' , ' elemScalar ' , ' elemTensor ' ] and label not in stat [ ' IndexOfLabel ' ] and label not in [ ' elements ' , ] ) \
2011-04-12 23:16:35 +05:30
or ( opt in [ ' homogenizationResult ' , ' crystalliteResult ' , ' constitutiveResult ' ] \
2016-03-03 15:13:43 +05:30
and ( not outputFormat [ opt [ : - 6 ] . capitalize ( ) ] [ ' outputs ' ] \
2018-10-19 10:56:27 +05:30
or label not in list ( zip ( * outputFormat [ opt [ : - 6 ] . capitalize ( ) ] [ ' outputs ' ] ) ) [ 0 ] ) ) :
2011-04-12 23:16:35 +05:30
parser . error ( ' %s " %s " unknown... ' % ( opt , label ) )
2010-08-17 02:17:27 +05:30
# --- output info
if options . info :
2011-04-12 23:16:35 +05:30
if options . filetype == ' marc ' :
2017-03-21 00:43:54 +05:30
print ( ' \n \n Mentat release {} ' . format ( damask . solver . Marc ( ) . version ( ) ) )
2011-06-21 18:08:58 +05:30
if options . filetype == ' spectral ' :
2016-10-25 00:46:29 +05:30
print ( ' \n \n {} ' . format ( p ) )
2011-03-10 15:15:57 +05:30
2012-02-28 18:51:59 +05:30
SummarizePostfile ( stat )
2016-07-18 21:59:12 +05:30
2016-10-25 00:46:29 +05:30
print ( ' \n User Defined Outputs ' )
2011-04-12 23:16:35 +05:30
for what in me :
2016-10-25 00:46:29 +05:30
print ( ' \n {} : ' . format ( what ) )
2011-04-12 23:16:35 +05:30
for output in outputFormat [ what ] [ ' outputs ' ] :
2016-10-25 00:46:29 +05:30
print ( ' {} ' . format ( output ) )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
sys . exit ( 0 )
2010-08-17 02:17:27 +05:30
2011-06-08 22:24:46 +05:30
# --- build connectivity maps
elementsOfNode = { }
2016-10-25 00:46:29 +05:30
for e in range ( stat [ ' NumberOfElements ' ] ) :
2017-10-03 23:33:50 +05:30
if options . verbose and e % 1000 == 0 : bg . set_message ( ' connect elem %i ... ' % e )
2011-06-08 22:24:46 +05:30
for n in map ( p . node_sequence , p . element ( e ) . items ) :
if n not in elementsOfNode :
2011-07-21 21:15:41 +05:30
elementsOfNode [ n ] = [ p . element_id ( e ) ]
2011-06-08 22:24:46 +05:30
else :
2011-07-21 21:15:41 +05:30
elementsOfNode [ n ] + = [ p . element_id ( e ) ]
2011-06-08 22:24:46 +05:30
maxCountElementsOfNode = 0
for l in elementsOfNode . values ( ) :
maxCountElementsOfNode = max ( maxCountElementsOfNode , len ( l ) )
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- build group membership --------------------------------
2010-11-02 21:15:23 +05:30
2011-07-22 17:29:03 +05:30
p . moveto ( offset_pos )
2011-04-12 23:16:35 +05:30
index = { }
groups = [ ]
groupCount = 0
memberCount = 0
2010-11-02 21:15:23 +05:30
2011-04-12 23:16:35 +05:30
if options . nodalScalar :
2016-10-25 00:46:29 +05:30
for n in range ( stat [ ' NumberOfNodes ' ] ) :
2017-10-03 23:33:50 +05:30
if options . verbose and n % 1000 == 0 : bg . set_message ( ' scan node %i ... ' % n )
2011-04-12 23:16:35 +05:30
myNodeID = p . node_id ( n )
myNodeCoordinates = [ p . node ( n ) . x , p . node ( n ) . y , p . node ( n ) . z ]
myElemID = 0
2011-05-06 15:30:27 +05:30
myIpID = 0
2011-04-12 23:16:35 +05:30
myGrainID = 0
2016-07-18 21:59:12 +05:30
2016-03-03 15:13:43 +05:30
# generate an expression that is only true for the locations specified by options.filter
filter = substituteLocation ( options . filter , [ myElemID , myNodeID , myIpID , myGrainID ] , myNodeCoordinates )
2011-04-12 23:16:35 +05:30
if filter != ' ' and not eval ( filter ) : # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
# --- group data locations
2016-03-03 15:13:43 +05:30
# generate a unique key for a group of separated data based on the separation criterium for the location
grp = substituteLocation ( ' # ' . join ( options . sep ) , [ myElemID , myNodeID , myIpID , myGrainID ] , myNodeCoordinates )
2011-04-12 23:16:35 +05:30
if grp not in index : # create a new group if not yet present
index [ grp ] = groupCount
2011-06-08 22:24:46 +05:30
groups . append ( [ [ 0 , 0 , 0 , 0 , 0.0 , 0.0 , 0.0 ] ] ) # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount + = 1
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] = mapIncremental ( ' ' , ' unique ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] ,
[ myElemID , myNodeID , myIpID , myGrainID ] ) # keep only if unique average location
groups [ index [ grp ] ] [ 0 ] [ 4 : ] = mapIncremental ( ' ' , ' avg ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ 4 : ] ,
myNodeCoordinates ) # incrementally update average location
groups [ index [ grp ] ] . append ( [ myElemID , myNodeID , myIpID , myGrainID , 0 ] ) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount + = 1
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
else :
2016-10-25 00:46:29 +05:30
for e in range ( stat [ ' NumberOfElements ' ] ) :
2017-10-03 23:33:50 +05:30
if options . verbose and e % 1000 == 0 : bg . set_message ( ' scan elem %i ... ' % e )
2011-04-12 23:16:35 +05:30
myElemID = p . element_id ( e )
2018-10-19 10:56:27 +05:30
myIpCoordinates = ipCoords ( p . element ( e ) . type , list ( map ( lambda node : [ node . x , node . y , node . z ] ,
list ( map ( p . node , map ( p . node_sequence , p . element ( e ) . items ) ) ) ) ) )
2011-05-06 15:30:27 +05:30
myIpIDs = ipIDs ( p . element ( e ) . type )
Nips = len ( myIpIDs )
myNodeIDs = p . element ( e ) . items [ : Nips ]
for n in range ( Nips ) :
myIpID = myIpIDs [ n ]
myNodeID = myNodeIDs [ n ]
2016-03-03 15:13:43 +05:30
for g in range ( ( ' GrainCount ' in stat [ ' IndexOfLabel ' ] \
and int ( p . element_scalar ( e , stat [ ' IndexOfLabel ' ] [ ' GrainCount ' ] ) [ 0 ] . value ) ) \
or 1 ) :
2011-04-12 23:16:35 +05:30
myGrainID = g + 1
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
# --- filter valid locations
2016-03-03 15:13:43 +05:30
# generates an expression that is only true for the locations specified by options.filter
2016-07-18 21:59:12 +05:30
filter = substituteLocation ( options . filter , [ myElemID , myNodeID , myIpID , myGrainID ] , myIpCoordinates [ n ] )
2016-10-25 00:46:29 +05:30
if filter != ' ' and not eval ( filter ) : # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
# --- group data locations
2016-03-03 15:13:43 +05:30
# generates a unique key for a group of separated data based on the separation criterium for the location
2016-07-18 21:59:12 +05:30
grp = substituteLocation ( ' # ' . join ( options . sep ) , [ myElemID , myNodeID , myIpID , myGrainID ] , myIpCoordinates [ n ] )
2016-10-25 00:46:29 +05:30
if grp not in index : # create a new group if not yet present
2011-04-12 23:16:35 +05:30
index [ grp ] = groupCount
2016-10-25 00:46:29 +05:30
groups . append ( [ [ 0 , 0 , 0 , 0 , 0.0 , 0.0 , 0.0 ] ] ) # initialize with avg location
2011-04-12 23:16:35 +05:30
groupCount + = 1
2016-07-18 21:59:12 +05:30
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] = mapIncremental ( ' ' , ' unique ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ : 4 ] ,
2016-10-25 00:46:29 +05:30
[ myElemID , myNodeID , myIpID , myGrainID ] ) # keep only if unique average location
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ 4 : ] = mapIncremental ( ' ' , ' avg ' ,
2011-04-13 22:01:44 +05:30
len ( groups [ index [ grp ] ] ) - 1 ,
2011-05-06 15:30:27 +05:30
groups [ index [ grp ] ] [ 0 ] [ 4 : ] ,
2016-10-25 00:46:29 +05:30
myIpCoordinates [ n ] ) # incrementally update average location
groups [ index [ grp ] ] . append ( [ myElemID , myNodeID , myIpID , myGrainID , n ] ) # append a new list defining each group member
2011-04-12 23:16:35 +05:30
memberCount + = 1
2011-05-06 15:30:27 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- sort groups --------------------------------
where = {
2011-05-06 15:30:27 +05:30
' elem ' : 0 ,
' node ' : 1 ,
' ip ' : 2 ,
' grain ' : 3 ,
' x ' : 4 ,
' y ' : 5 ,
' z ' : 6 ,
2011-04-12 23:16:35 +05:30
}
sortProperties = [ ]
2011-07-21 21:15:41 +05:30
for item in options . sep :
2011-04-13 22:01:44 +05:30
if item not in options . sort :
2011-04-12 23:16:35 +05:30
sortProperties . append ( item )
theKeys = [ ]
2014-04-15 03:15:24 +05:30
if ' none ' not in map ( str . lower , options . sort ) :
for criterium in options . sort + sortProperties :
if criterium in where :
theKeys . append ( ' x[0][ %i ] ' % where [ criterium ] )
2011-04-12 23:16:35 +05:30
sortKeys = eval ( ' lambda x:( %s ) ' % ( ' , ' . join ( theKeys ) ) )
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' sorting groups... ' )
2016-10-25 00:46:29 +05:30
groups . sort ( key = sortKeys ) # in-place sorting to save mem
2011-04-12 23:16:35 +05:30
2011-07-21 21:15:41 +05:30
# --------------------------- create output dir --------------------------------
2011-06-15 23:19:59 +05:30
2012-01-21 01:23:56 +05:30
dirname = os . path . abspath ( os . path . join ( os . path . dirname ( filename ) , options . dir ) )
2011-06-15 23:19:59 +05:30
if not os . path . isdir ( dirname ) :
2016-10-25 00:46:29 +05:30
os . mkdir ( dirname , 0o755 )
2011-06-15 23:19:59 +05:30
2011-04-12 23:16:35 +05:30
fileOpen = False
assembleHeader = True
header = [ ]
standard = [ ' inc ' ] + \
2016-04-22 23:49:32 +05:30
( [ ' time ' ] if options . time else [ ] ) + \
2016-04-16 03:53:35 +05:30
[ ' elem ' , ' node ' , ' ip ' , ' grain ' , ' 1_pos ' , ' 2_pos ' , ' 3_pos ' ]
2011-04-12 23:16:35 +05:30
2011-06-21 18:08:58 +05:30
# --------------------------- loop over positions --------------------------------
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' getting map between positions and increments... ' )
2011-07-21 21:15:41 +05:30
incAtPosition = { }
positionOfInc = { }
2011-06-21 18:08:58 +05:30
2018-10-19 10:56:27 +05:30
for position in range ( int ( stat [ ' NumberOfIncrements ' ] ) ) :
2011-06-21 18:08:58 +05:30
p . moveto ( position + offset_pos )
2011-07-21 21:15:41 +05:30
incAtPosition [ position ] = p . increment # remember "real" increment at this position
positionOfInc [ p . increment ] = position # remember position of "real" increment
2011-07-22 17:29:03 +05:30
if not options . range :
options . getIncrements = False
locations = range ( stat [ ' NumberOfIncrements ' ] ) # process all positions
else :
options . range = list ( options . range ) # convert to list
2012-03-05 20:52:10 +05:30
if options . getIncrements :
locations = [ positionOfInc [ x ] for x in range ( options . range [ 0 ] , options . range [ 1 ] + 1 , options . range [ 2 ] )
if x in positionOfInc ]
2011-07-21 21:15:41 +05:30
else :
locations = range ( max ( 0 , options . range [ 0 ] ) ,
2012-03-05 20:52:10 +05:30
min ( stat [ ' NumberOfIncrements ' ] , options . range [ 1 ] + 1 ) ,
2011-07-21 21:15:41 +05:30
options . range [ 2 ] )
2012-03-05 20:52:10 +05:30
increments = [ incAtPosition [ x ] for x in locations ] # build list of increments to process
2011-04-12 23:16:35 +05:30
time_start = time . time ( )
2012-03-05 20:52:10 +05:30
for incCount , position in enumerate ( locations ) : # walk through locations
2011-07-22 17:29:03 +05:30
p . moveto ( position + offset_pos ) # wind to correct position
2011-07-21 21:15:41 +05:30
2011-04-12 23:16:35 +05:30
# --------------------------- file management --------------------------------
2010-08-17 02:17:27 +05:30
2011-04-12 23:16:35 +05:30
if options . separateFiles :
if fileOpen :
2016-10-25 00:46:29 +05:30
file . close ( ) # noqa
2011-04-12 23:16:35 +05:30
fileOpen = False
2016-03-03 15:13:43 +05:30
outFilename = eval ( ' " ' + eval ( " ' %% s_inc %% 0 %i i %% s.txt ' % (math.log10(max(increments+[1]))+1) " ) \
+ ' " % (dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount],options.suffix) ' )
2011-04-12 23:16:35 +05:30
else :
2012-02-28 19:54:45 +05:30
outFilename = ' %s .txt ' % ( dirname + os . sep + options . prefix + os . path . split ( filename ) [ 1 ] + options . suffix )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
if not fileOpen :
file = open ( outFilename , ' w ' )
fileOpen = True
2011-05-23 12:43:28 +05:30
file . write ( ' 2 \t header \n ' )
2016-07-18 21:59:12 +05:30
file . write ( scriptID + ' \t ' + ' ' . join ( sys . argv [ 1 : ] ) + ' \n ' )
2011-04-12 23:16:35 +05:30
headerWritten = False
file . flush ( )
# --------------------------- read and map data per group --------------------------------
member = 0
2011-05-06 15:30:27 +05:30
for group in groups :
2011-04-12 23:16:35 +05:30
2016-10-25 00:46:29 +05:30
N = 0 # group member counter
for ( e , n , i , g , n_local ) in group [ 1 : ] : # loop over group members
2011-04-12 23:16:35 +05:30
member + = 1
if member % 1000 == 0 :
2011-07-22 17:29:03 +05:30
time_delta = ( ( len ( locations ) * memberCount ) / float ( member + incCount * memberCount ) - 1.0 ) * ( time . time ( ) - time_start )
2017-10-03 23:33:50 +05:30
if options . verbose : bg . set_message ( ' ( %02i : %02i : %02i ) processing point %i of %i from increment %i (position %i )... '
2016-03-03 15:13:43 +05:30
% ( time_delta / / 3600 , time_delta % 3600 / / 60 , time_delta % 60 , member , memberCount , increments [ incCount ] , position ) )
2011-04-13 22:01:44 +05:30
2016-10-25 00:46:29 +05:30
newby = [ ] # current member's data
2011-04-13 22:01:44 +05:30
2011-06-08 22:24:46 +05:30
if options . nodalScalar :
for label in options . nodalScalar :
if label == ' elements ' :
length = maxCountElementsOfNode
content = elementsOfNode [ p . node_sequence ( n ) ] + [ 0 ] * ( length - len ( elementsOfNode [ p . node_sequence ( n ) ] ) )
else :
length = 1
content = [ p . node_scalar ( p . node_sequence ( n ) , stat [ ' IndexOfLabel ' ] [ label ] ) ]
2016-03-03 15:13:43 +05:30
if assembleHeader :
header + = heading ( ' _ ' , [ [ component , ' ' . join ( label . split ( ) ) ]
for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
2011-06-08 22:24:46 +05:30
newby . append ( { ' label ' : label ,
' len ' : length ,
' content ' : content } )
2011-07-21 21:15:41 +05:30
if options . elemScalar :
for label in options . elemScalar :
2016-07-18 21:59:12 +05:30
if assembleHeader :
2012-05-06 00:13:48 +05:30
header + = [ ' ' . join ( label . split ( ) ) ]
2011-04-12 23:16:35 +05:30
newby . append ( { ' label ' : label ,
' len ' : 1 ,
2011-04-13 22:01:44 +05:30
' content ' : [ p . element_scalar ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ label ] ) [ n_local ] . value ] } )
2011-04-12 23:16:35 +05:30
2011-07-21 21:15:41 +05:30
if options . elemTensor :
for label in options . elemTensor :
2016-07-18 21:59:12 +05:30
if assembleHeader :
2016-03-03 15:13:43 +05:30
header + = heading ( ' . ' , [ [ ' ' . join ( label . split ( ) ) , component ]
for component in [ ' intensity ' , ' t11 ' , ' t22 ' , ' t33 ' , ' t12 ' , ' t23 ' , ' t13 ' ] ] )
2011-04-13 22:01:44 +05:30
myTensor = p . element_tensor ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ label ] ) [ n_local ]
2011-04-12 23:16:35 +05:30
newby . append ( { ' label ' : label ,
2011-05-05 14:46:29 +05:30
' len ' : 7 ,
2016-07-18 21:59:12 +05:30
' content ' : [ myTensor . intensity ,
2011-04-12 23:16:35 +05:30
myTensor . t11 , myTensor . t22 , myTensor . t33 ,
myTensor . t12 , myTensor . t23 , myTensor . t13 ,
] } )
2016-07-18 21:59:12 +05:30
2011-04-12 23:16:35 +05:30
if options . homogenizationResult or \
options . crystalliteResult or \
options . constitutiveResult :
for ( label , resultType ) in zip ( options . homogenizationResult +
2011-07-21 21:15:41 +05:30
options . crystalliteResult +
options . constitutiveResult ,
[ ' Homogenization ' ] * len ( options . homogenizationResult ) +
[ ' Crystallite ' ] * len ( options . crystalliteResult ) +
[ ' Constitutive ' ] * len ( options . constitutiveResult )
) :
2018-10-19 10:56:27 +05:30
outputIndex = ( list ( zip ( * outputFormat [ resultType ] [ ' outputs ' ] ) ) [ 0 ] ) . index ( label ) # find the position of this output in the outputFormat
2011-04-12 23:16:35 +05:30
length = int ( outputFormat [ resultType ] [ ' outputs ' ] [ outputIndex ] [ 1 ] )
2012-05-06 00:13:48 +05:30
thisHead = heading ( ' _ ' , [ [ component , ' ' . join ( label . split ( ) ) ] for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
2011-04-12 23:16:35 +05:30
if assembleHeader : header + = thisHead
2011-05-24 22:53:22 +05:30
if resultType != ' Homogenization ' :
thisHead = heading ( ' _ ' , [ [ g , component , label ] for component in range ( int ( length > 1 ) , length + int ( length > 1 ) ) ] )
2015-04-28 23:24:00 +05:30
try :
newby . append ( { ' label ' : label ,
' len ' : length ,
2016-07-18 21:59:12 +05:30
' content ' : [ p . element_scalar ( p . element_sequence ( e ) , stat [ ' IndexOfLabel ' ] [ head ] ) [ n_local ] . value
2015-04-28 23:24:00 +05:30
for head in thisHead ] } )
except KeyError :
2016-10-25 00:46:29 +05:30
print ( ' \n DAMASK outputs seem missing from " post " section of the *.dat file! ' )
2015-04-28 23:24:00 +05:30
sys . exit ( )
2011-04-12 23:16:35 +05:30
assembleHeader = False
2011-04-13 22:01:44 +05:30
if N == 0 :
2016-10-25 00:46:29 +05:30
mappedResult = [ float ( x ) for x in range ( len ( header ) ) ] # init with debug data (should get deleted by *N at N=0)
2011-04-12 23:16:35 +05:30
pos = 0
for chunk in newby :
mappedResult [ pos : pos + chunk [ ' len ' ] ] = mapIncremental ( chunk [ ' label ' ] , options . func ,
N , mappedResult [ pos : pos + chunk [ ' len ' ] ] , chunk [ ' content ' ] )
pos + = chunk [ ' len ' ]
N + = 1
# --- write data row to file ---
if not headerWritten :
file . write ( ' \t ' . join ( standard + header ) + ' \n ' )
headerWritten = True
2018-11-17 15:43:51 +05:30
file . write ( ' \t ' . join ( list ( map ( str , [ p . increment ] + \
2011-04-12 23:16:35 +05:30
{ True : [ p . time ] , False : [ ] } [ options . time ] + \
group [ 0 ] + \
mappedResult )
2018-11-17 15:43:51 +05:30
) ) + ' \n ' )
2016-07-18 21:59:12 +05:30
2010-08-17 02:17:27 +05:30
if fileOpen :
2011-04-12 23:16:35 +05:30
file . close ( )
2010-08-17 02:17:27 +05:30