DAMASK_EICMD/processing/pre/geom_pack.py

183 lines
6.7 KiB
Python
Raw Normal View History

#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,string,re,math,numpy
import damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
scriptID = '$Id$'
scriptName = scriptID.split()[1]
#--------------------------------------------------------------------------------------------------
class extendedOption(Option):
#--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
synonyms = {
'grid': ['resolution'],
'size': ['dimension'],
}
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
parser = OptionParser(option_class=extendedOption, usage='%prog options [file[s]]', description = """
compress geometry files with ranges "a to b" and/or multiples "n of x".
""" + string.replace(scriptID,'\n','\\n')
)
(options, filenames) = parser.parse_args()
#--- setup file handles ---------------------------------------------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN',
'input':sys.stdin,
'output':sys.stdout,
'croak':sys.stderr,
})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name,
'input':open(name),
'output':open(name+'_tmp','w'),
'croak':sys.stdout,
})
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
theTable = damask.ASCIItable(file['input'],file['output'],labels=False)
theTable.head_read()
#--- interpret header ----------------------------------------------------------------------------
2013-05-14 22:30:41 +05:30
info = {
'grid': numpy.zeros(3,'i'),
'size': numpy.zeros(3,'d'),
'origin': numpy.zeros(3,'d'),
2013-06-30 18:05:04 +05:30
'homogenization': 0,
'microstructures': 0,
}
extra_header = []
for header in theTable.info:
headitems = map(str.lower,header.split())
if len(headitems) == 0: continue
for synonym,alternatives in synonyms.iteritems():
if headitems[0] in alternatives: headitems[0] = synonym
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
info[headitems[0]][i] = \
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
extra_header.append(header)
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
if numpy.any(info['grid'] < 1):
file['croak'].write('invalid grid a b c.\n')
continue
if numpy.any(info['size'] <= 0.0):
file['croak'].write('invalid size x y z.\n')
continue
#--- write header ---------------------------------------------------------------------------------
theTable.labels_clear()
theTable.info_clear()
theTable.info_append(extra_header+[
2014-01-20 20:11:56 +05:30
scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta %i\tb %i\tc %i"%(info['grid'][0],info['grid'][1],info['grid'][2],),
"size\tx %e\ty %e\tz %e"%(info['size'][0],info['size'][1],info['size'][2],),
"origin\tx %e\ty %e\tz %e"%(info['origin'][0],info['origin'][1],info['origin'][2],),
"homogenization\t%i"%info['homogenization'],
2013-06-30 18:05:04 +05:30
"microstructures\t%i"%(info['microstructures']),
])
theTable.head_write()
theTable.output_flush()
# --- write packed microstructure information -----------------------------------------------------
type = ''
former = -1
start = -1
reps = 0
theTable.data_rewind()
while theTable.data_read():
items = theTable.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)
for current in items:
if current == former+1 and start+reps == former+1:
type = 'to'
reps += 1
elif current == former and start == former:
type = 'of'
reps += 1
else:
theTable.data = {
'' : [],
'.' : [str(former)],
'to': ['%i to %i'%(former-reps+1,former)],
'of': ['%i of %i'%(reps,former)],
}[type]
theTable.data_write(delimiter=' ')
type = '.'
start = current
reps = 1
former = current
theTable.data = {
'.' : [str(former)],
'to': ['%i to %i'%(former-reps+1,former)],
'of': ['%i of %i'%(reps,former)],
}[type]
theTable.data_write(delimiter=' ')
theTable.output_flush()
#--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['input'].close()
file['output'].close()
os.rename(file['name']+'_tmp',file['name'])