added test for converter from Ricardo's files
This commit is contained in:
parent
885a71c0b7
commit
46d67f63d8
|
@ -1,40 +1,21 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
# -*- coding: UTF-8 no BOM -*-
|
||||||
|
|
||||||
import os,sys,math,string,numpy
|
import os,sys,math,string
|
||||||
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
|
import numpy as np
|
||||||
|
from optparse import OptionParser
|
||||||
|
import damask
|
||||||
|
|
||||||
scriptID = '$Id$'
|
scriptID = '$Id$'
|
||||||
scriptName = scriptID.split()[1]
|
scriptName = scriptID.split()[1]
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
class extendableOption(Option):
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
|
|
||||||
# taken from online tutorial http://docs.python.org/library/optparse.html
|
|
||||||
|
|
||||||
ACTIONS = Option.ACTIONS + ("extend",)
|
|
||||||
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
|
|
||||||
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
|
|
||||||
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
|
|
||||||
|
|
||||||
def take_action(self, action, dest, opt, value, values, parser):
|
|
||||||
if action == "extend":
|
|
||||||
lvalue = value.split(",")
|
|
||||||
values.ensure_value(dest, []).extend(lvalue)
|
|
||||||
else:
|
|
||||||
Option.take_action(self, action, dest, opt, value, values, parser)
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||||
Generate geometry description and material configuration from input files used by R.A. Lebensohn
|
Generate geometry description and material configuration from input files used by R.A. Lebensohn
|
||||||
""" + string.replace('$Id$','\n','\\n')
|
|
||||||
)
|
|
||||||
|
|
||||||
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('--column', dest='column', type='int', metavar = 'int', \
|
parser.add_option('--column', dest='column', type='int', metavar = 'int', \
|
||||||
help='data column to discriminate phase 1 from 2 [%default]')
|
help='data column to discriminate phase 1 from 2 [%default]')
|
||||||
|
@ -81,9 +62,9 @@ for file in files:
|
||||||
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
|
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
'grid': numpy.zeros(3,'i'),
|
'grid': np.zeros(3,'i'),
|
||||||
'size': numpy.zeros(3,'d'),
|
'size': np.zeros(3,'d'),
|
||||||
'origin': numpy.zeros(3,'d'),
|
'origin': np.zeros(3,'d'),
|
||||||
'microstructures': 0,
|
'microstructures': 0,
|
||||||
'homogenization': options.homogenization
|
'homogenization': options.homogenization
|
||||||
}
|
}
|
||||||
|
@ -120,10 +101,10 @@ for file in files:
|
||||||
'homogenization: %i\n'%info['homogenization'] + \
|
'homogenization: %i\n'%info['homogenization'] + \
|
||||||
'microstructures: %i\n\n'%info['microstructures'])
|
'microstructures: %i\n\n'%info['microstructures'])
|
||||||
|
|
||||||
if numpy.any(info['grid'] < 1):
|
if np.any(info['grid'] < 1):
|
||||||
file['croak'].write('invalid grid a b c.\n')
|
file['croak'].write('invalid grid a b c.\n')
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if numpy.any(info['size'] <= 0.0):
|
if np.any(info['size'] <= 0.0):
|
||||||
file['croak'].write('invalid size x y z.\n')
|
file['croak'].write('invalid size x y z.\n')
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
|
@ -143,7 +124,7 @@ for file in files:
|
||||||
|
|
||||||
#--- output finalization --------------------------------------------------------------------------
|
#--- output finalization --------------------------------------------------------------------------
|
||||||
if file['name'] != 'STDIN':
|
if file['name'] != 'STDIN':
|
||||||
table.output_close()
|
file['output'].close()
|
||||||
os.rename(file['name']+'_tmp',os.path.splitext(file['name'])[0] + \
|
os.rename(file['name']+'_tmp',os.path.splitext(file['name'])[0] + \
|
||||||
{True: '_material.config',
|
{True: '_material.config',
|
||||||
False:'.geom'}[options.config])
|
False:'.geom'}[options.config])
|
||||||
|
|
|
@ -20,14 +20,14 @@ def meshgrid2(*arrs):
|
||||||
dim = len(arrs)
|
dim = len(arrs)
|
||||||
ans = []
|
ans = []
|
||||||
for i, arr in enumerate(arrs):
|
for i, arr in enumerate(arrs):
|
||||||
slc = np.ones(dim,'i')
|
slc = np.ones(dim,'i')
|
||||||
slc[i] = lens[i]
|
slc[i] = lens[i]
|
||||||
arr2 = np.asarray(arr).reshape(slc)
|
arr2 = np.asarray(arr).reshape(slc)
|
||||||
for j, sz in enumerate(lens):
|
for j, sz in enumerate(lens):
|
||||||
if j != i:
|
if j != i:
|
||||||
arr2 = arr2.repeat(sz, axis=j)
|
arr2 = arr2.repeat(sz, axis=j)
|
||||||
|
|
||||||
ans.insert(0,arr2)
|
ans.insert(0,arr2)
|
||||||
return tuple(ans)
|
return tuple(ans)
|
||||||
|
|
||||||
|
|
||||||
|
@ -54,6 +54,7 @@ mappings = {
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||||
Generate geometry description and material configuration by standard Voronoi tessellation of given seeds file.
|
Generate geometry description and material configuration by standard Voronoi tessellation of given seeds file.
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('-g', '--grid', dest='grid', type='int', nargs = 3, metavar = 'int int int', \
|
parser.add_option('-g', '--grid', dest='grid', type='int', nargs = 3, metavar = 'int int int', \
|
||||||
|
|
|
@ -36,7 +36,7 @@ compress geometry files with ranges "a to b" and/or multiples "n of x".
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
(options, filenames) = parser.parse_args()
|
||||||
|
|
||||||
# ------------------------------------------ setup file handles ------------------------------------
|
# ------------------------------------------ setup file handles -----------------------------------
|
||||||
files = []
|
files = []
|
||||||
if filenames == []:
|
if filenames == []:
|
||||||
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
|
||||||
|
@ -45,7 +45,7 @@ else:
|
||||||
if os.path.exists(name):
|
if os.path.exists(name):
|
||||||
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr})
|
||||||
|
|
||||||
# ------------------------------------------ loop over input files ---------------------------------
|
# ------------------------------------------ loop over input files --------------------------------
|
||||||
for file in files:
|
for file in files:
|
||||||
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
|
||||||
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
|
||||||
|
@ -53,7 +53,7 @@ for file in files:
|
||||||
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
||||||
table.head_read() # read ASCII header info
|
table.head_read() # read ASCII header info
|
||||||
|
|
||||||
#--- interpret header ----------------------------------------------------------------------------
|
#--- interpret header -----------------------------------------------------------------------------
|
||||||
info = {
|
info = {
|
||||||
'grid': np.zeros(3,'i'),
|
'grid': np.zeros(3,'i'),
|
||||||
'size': np.zeros(3,'d'),
|
'size': np.zeros(3,'d'),
|
||||||
|
@ -111,7 +111,7 @@ for file in files:
|
||||||
reps = 0
|
reps = 0
|
||||||
|
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
items = table.data
|
items = table.data
|
||||||
if len(items) > 2:
|
if len(items) > 2:
|
||||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||||
|
@ -136,7 +136,7 @@ for file in files:
|
||||||
elif type == 'of':
|
elif type == 'of':
|
||||||
table.data = ['%i of %i'%(reps,former)]
|
table.data = ['%i of %i'%(reps,former)]
|
||||||
|
|
||||||
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
||||||
type = '.'
|
type = '.'
|
||||||
start = current
|
start = current
|
||||||
reps = 1
|
reps = 1
|
||||||
|
@ -148,14 +148,14 @@ for file in files:
|
||||||
'to': ['%i to %i'%(former-reps+1,former)],
|
'to': ['%i to %i'%(former-reps+1,former)],
|
||||||
'of': ['%i of %i'%(reps,former)],
|
'of': ['%i of %i'%(reps,former)],
|
||||||
}[type]
|
}[type]
|
||||||
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
outputAlive = table.data_write(delimiter = ' ') # output processed line
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
# ------------------------------------------ output result ---------------------------------------
|
||||||
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
outputAlive and table.output_flush() # just in case of buffered ASCII table
|
||||||
|
|
||||||
#--- output finalization --------------------------------------------------------------------------
|
#--- output finalization --------------------------------------------------------------------------
|
||||||
if file['name'] != 'STDIN':
|
if file['name'] != 'STDIN':
|
||||||
table.input_close() # close input ASCII table
|
table.input_close() # close input ASCII table
|
||||||
table.output_close() # close input ASCII table
|
table.output_close() # close input ASCII table
|
||||||
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
|
||||||
|
|
|
@ -31,7 +31,6 @@ mappings = {
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
||||||
Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (exclusively in one line).
|
Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (exclusively in one line).
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('-1', '--onedimensional', dest='oneD', action='store_true', \
|
parser.add_option('-1', '--onedimensional', dest='oneD', action='store_true', \
|
||||||
|
@ -58,6 +57,7 @@ for file in files:
|
||||||
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
table = damask.ASCIItable(file['input'],file['output'],labels = False,buffered = False) # make unbuffered ASCII_table
|
||||||
table.head_read() # read ASCII header info
|
table.head_read() # read ASCII header info
|
||||||
|
|
||||||
|
|
||||||
#--- interpret header ----------------------------------------------------------------------------
|
#--- interpret header ----------------------------------------------------------------------------
|
||||||
info = {
|
info = {
|
||||||
'grid': np.zeros(3,'i'),
|
'grid': np.zeros(3,'i'),
|
||||||
|
@ -100,7 +100,7 @@ for file in files:
|
||||||
microstructure = np.zeros(info['grid'].prod(),'i')
|
microstructure = np.zeros(info['grid'].prod(),'i')
|
||||||
i = 0
|
i = 0
|
||||||
|
|
||||||
while table.data_read(): # read next data line of ASCII table
|
while table.data_read(): # read next data line of ASCII table
|
||||||
items = table.data
|
items = table.data
|
||||||
if len(items) > 2:
|
if len(items) > 2:
|
||||||
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
|
||||||
|
|
Loading…
Reference in New Issue