made spectral preprocessing more verbose (now reporting changes on header) and aware of new keywords, added coding information below shebang

This commit is contained in:
Martin Diehl 2013-05-13 11:27:59 +00:00
parent deb2bd7dca
commit 52af9aaabf
7 changed files with 228 additions and 218 deletions

View File

@ -4,9 +4,9 @@
import string,os,sys import string,os,sys
from optparse import OptionParser, Option from optparse import OptionParser, Option
# ----------------------------- #-------------------------------------------------------------------------------------------------
class extendableOption(Option): class extendableOption(Option):
# ----------------------------- #-------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments # used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html # taken from online tutorial http://docs.python.org/library/optparse.html
@ -23,10 +23,9 @@ class extendableOption(Option):
Option.take_action(self, action, dest, opt, value, values, parser) Option.take_action(self, action, dest, opt, value, values, parser)
# -------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Converts ang files (EBSD Data) from hexagonal grid to a pixel grid Converts ang files (EBSD Data) from hexagonal grid to a pixel grid
@ -42,10 +41,9 @@ parser.set_defaults(columnX = 3)
counterX = 0 counterX = 0
counterY = 0 counterY = 0
addPoints = -1 # No of doubled points (must be the same for each odd/even line, initializing with -1 make countin easy!) addPoints = -1 # No of doubled points (must be the same for each odd/even line, initializing with -1 make countin easy!)
# ------------------------------------------ setup file handles ---------------------------------------
#--- setup file handles ---------------------------------------------------------------------------
files = [] files = []
if filenames == []: if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
@ -55,8 +53,7 @@ else:
files.append( {'name':name, 'input':open(name),'output':open(os.path.splitext(name)[0]\ files.append( {'name':name, 'input':open(name),'output':open(os.path.splitext(name)[0]\
+'_cub'+os.path.splitext(name)[1], 'w')}) +'_cub'+os.path.splitext(name)[1], 'w')})
# ------------------------------------------ loop over input files --------------------------------------- #--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
print file['name'] print file['name']
x = 0 x = 0

View File

@ -1,12 +1,13 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,numpy,string,damask import os,re,sys,math,numpy,string,damask
from scipy import ndimage from scipy import ndimage
from optparse import OptionParser, Option from optparse import OptionParser, Option
# ----------------------------- #--------------------------------------------------------------------------------------------------
class extendableOption(Option): class extendableOption(Option):
# ----------------------------- #--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments # used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html # taken from online tutorial http://docs.python.org/library/optparse.html
@ -44,23 +45,25 @@ def periodic_3Dpad(array, rimdim=(1,1,1)):
padded[p[0],p[1],p[2]] = array[spot[0],spot[1],spot[2]] padded[p[0],p[1],p[2]] = array[spot[0],spot[1],spot[2]]
return padded return padded
# -------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# MAIN # MAIN
# -------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
identifiers = { identifiers = {
'resolution': ['a','b','c'], 'grid': ['a','b','c'],
'dimension': ['x','y','z'], 'size': ['x','y','z'],
'origin': ['x','y','z'], 'origin': ['x','y','z'],
}
mappings = {
'resolution': lambda x: int(x),
'dimension': lambda x: float(x),
'origin': lambda x: float(x),
} }
features = [ \ mappings = {
{'aliens': 1, 'names': ['boundary','biplane'],}, 'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
features = [
{'aliens': 1, 'names': ['boundary, biplane'],},
{'aliens': 2, 'names': ['tripleline',],}, {'aliens': 2, 'names': ['tripleline',],},
{'aliens': 3, 'names': ['quadruplepoint',],} {'aliens': 3, 'names': ['quadruplepoint',],}
] ]
@ -113,12 +116,11 @@ boundaries, triple lines, and quadruple points.
) )
parser.add_option('-t','--type', dest='type', action='extend', type='string', \ parser.add_option('-t','--type', dest='type', action='extend', type='string', \
help='feature type (%s)'%(', '.join(map(lambda x:', '.join(x['names']),features)))) help='feature type (%s)'%(', '.join(map(lambda x:', '.join(x['names']),features))))
parser.add_option('-n','--neighborhood', dest='neigborhood', action='store', type='string', \ parser.add_option('-n','--neighborhood', dest='neigborhood', action='store', type='string', \
help='type of neighborhood (%s)'%(', '.join(neighborhoods.keys())), \ help='type of neighborhood (%s) [neumann]'%(', '.join(neighborhoods.keys())))
metavar='<int>')
parser.add_option('-2', '--twodimensional', dest='twoD', action='store_true', \ parser.add_option('-2', '--twodimensional', dest='twoD', action='store_true', \
help='output geom file with two-dimensional data arrangement') help='output geom file with two-dimensional data arrangement [%default]')
parser.set_defaults(type = []) parser.set_defaults(type = [])
parser.set_defaults(neighborhood = 'neumann') parser.set_defaults(neighborhood = 'neumann')
@ -133,14 +135,13 @@ if options.neighborhood not in neighborhoods:
feature_list = [] feature_list = []
for i,feature in enumerate(features): for i,feature in enumerate(features):
for name in feature['names']: for name in feature['names']:
for type in options.type: for myType in options.type:
if name.startswith(type): if name.startswith(myType):
feature_list.append(i) # remember valid features feature_list.append(i) # remember valid features
break break
print feature_list
# ------------------------------------------ setup file handles ---------------------------------------
#--- setup file handles ---------------------------------------------------------------------------
files = [] files = []
if filenames == []: if filenames == []:
files.append({'name':'STDIN', files.append({'name':'STDIN',
@ -157,18 +158,15 @@ else:
'croak':sys.stdout, 'croak':sys.stdout,
}) })
# ------------------------------------------ loop over input files --------------------------------------- #--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n') if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline() firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower()) m = re.search('(\d+)\s*head', firstline.lower())
if m: if m:
headerlines = int(m.group(1)) headerlines = int(m.group(1))
headers = [firstline]+[file['input'].readline() for i in range(headerlines)] headers = [file['input'].readline() for i in range(headerlines)]
else: else:
headerlines = 1 headerlines = 1
headers = firstline headers = firstline
@ -176,15 +174,25 @@ for file in files:
content = file['input'].readlines() content = file['input'].readlines()
file['input'].close() file['input'].close()
info = {'resolution': numpy.array([0,0,0]), #--- interpretate header --------------------------------------------------------------------------
'dimension': numpy.array([0.0,0.0,0.0]), info = {
'origin': numpy.array([0.0,0.0,0.0]), 'grid': numpy.array([0,0,0]),
'homogenization': 1, 'size': numpy.array([0.0,0.0,0.0]),
'origin': numpy.array([0.0,0.0,0.0]),
'microstructures': 0,
'homogenization': 0
} }
newInfo = {
'microstructures': 0,
}
new_header = [] new_header = []
new_header.append('$Id$\n')
for header in headers: for header in headers:
headitems = map(str.lower,header.split()) headitems = map(str.lower,header.split())
if headitems[0] == 'resolution': headitems[0] = 'grid'
if headitems[0] == 'dimension': headitems[0] = 'size'
if headitems[0] in mappings.keys(): if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys(): if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])): for i in xrange(len(identifiers[headitems[0]])):
@ -192,44 +200,39 @@ for file in files:
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1]) mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else: else:
info[headitems[0]] = mappings[headitems[0]](headitems[1]) info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
new_header.append(header)
if numpy.all(info['resolution'] == 0): if numpy.all(info['grid'] == 0):
file['croak'].write('no resolution info found.\n') file['croak'].write('no grid info found.\n')
continue continue
if numpy.all(info['dimension'] == 0.0): if numpy.all(info['size'] == 0.0):
file['croak'].write('no dimension info found.\n') file['croak'].write('no size info found.\n')
continue continue
file['croak'].write('resolution: %s\n'%(' x '.join(map(str,info['resolution']))) + \ file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'dimension: %s\n'%(' x '.join(map(str,info['dimension']))) + \ 'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization']) 'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
new_header.append("resolution\ta %i\tb %i\tc %i\n"%( new_header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],))
info['resolution'][0], new_header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],))
info['resolution'][1], new_header.append("origin\tx %f\ty %f\tz %f\n"%(info['origin'][0],info['origin'][1],info['origin'][2],))
info['resolution'][2],))
new_header.append("dimension\tx %f\ty %f\tz %f\n"%(
info['dimension'][0],
info['dimension'][1],
info['dimension'][2],))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
info['origin'][0],
info['origin'][1],
info['origin'][2],))
new_header.append("homogenization\t%i\n"%info['homogenization']) new_header.append("homogenization\t%i\n"%info['homogenization'])
structure = numpy.zeros(info['resolution'],'i') #--- process input --------------------------------------------------------------------------------
structure = numpy.zeros(info['grid'],'i')
i = 0 i = 0
for line in content: for line in content:
for item in map(int,line.split()): for item in map(int,line.split()):
structure[i%info['resolution'][0], structure[i%info['grid'][0],
(i/info['resolution'][0])%info['resolution'][1], (i/info['grid'][0])%info['grid'][1],
i/info['resolution'][0] /info['resolution'][1]] = item i/info['grid'][0] /info['grid'][1]] = item
i += 1 i += 1
neighborhood = neighborhoods[options.neighborhood] neighborhood = neighborhoods[options.neighborhood]
convoluted = numpy.empty([len(neighborhood)]+list(info['resolution']+2),'i') convoluted = numpy.empty([len(neighborhood)]+list(info['grid']+2),'i')
microstructure = periodic_3Dpad(structure) microstructure = periodic_3Dpad(structure)
for i,p in enumerate(neighborhood): for i,p in enumerate(neighborhood):
@ -241,11 +244,11 @@ for file in files:
convoluted[i,:,:,:] = ndimage.convolve(microstructure,stencil) convoluted[i,:,:,:] = ndimage.convolve(microstructure,stencil)
distance = numpy.ones((len(feature_list),info['resolution'][0],info['resolution'][1],info['resolution'][2]),'d') distance = numpy.ones((len(feature_list),info['grid'][0],info['grid'][1],info['grid'][2]),'d')
convoluted = numpy.sort(convoluted,axis=0) convoluted = numpy.sort(convoluted,axis=0)
uniques = numpy.zeros(info['resolution']) uniques = numpy.zeros(info['grid'])
check = numpy.empty(info['resolution']) check = numpy.empty(info['grid'])
check[:,:,:] = numpy.nan check[:,:,:] = numpy.nan
for i in xrange(len(neighborhood)): for i in xrange(len(neighborhood)):
uniques += numpy.where(convoluted[i,1:-1,1:-1,1:-1] == check,0,1) uniques += numpy.where(convoluted[i,1:-1,1:-1,1:-1] == check,0,1)
@ -254,32 +257,30 @@ for file in files:
distance[i,:,:,:] = numpy.where(uniques > features[feature_id]['aliens'],0.0,1.0) distance[i,:,:,:] = numpy.where(uniques > features[feature_id]['aliens'],0.0,1.0)
for i in xrange(len(feature_list)): for i in xrange(len(feature_list)):
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[max(info['dimension']/info['resolution'])]*3 distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*\
[max(info['size']/info['grid'])]*3
for i,feature in enumerate(feature_list): for i,feature in enumerate(feature_list):
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1)) newInfo['microstructures'] = int(math.ceil(distance[i,:,:,:].max()))
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1))
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader\n'%(len(new_header)) #--- assemble header and report changes -----------------------------------------------------------
output += ''.join(new_header) output = '%i\theader\n'%(len(new_header)+1)
output += ''.join(new_header)
output += "microstructures\t%i\n"%newInfo['microstructures']
file['croak'].write('\n'+features[i]['names'][0]+'\n')
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
# ------------------------------------- regenerate texture information ---------------------------------- #--- write new data -------------------------------------------------------------------------------
for z in xrange(info['grid'][2]):
for z in xrange(info['resolution'][2]): for y in xrange(info['grid'][1]):
for y in xrange(info['resolution'][1]): output += {True:' ',False:'\n'}[options.twoD].join(map(lambda x: \
output += {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%(round(x)), distance[i,:,y,z])) + '\n' ('%%%ii'%formatwidth)%(round(x)), distance[i,:,y,z])) + '\n'
file['output'][i].write(output)
# ------------------------------------------ output result ---------------------------------------
file['output'][i].write(output)
if file['name'] != 'STDIN': if file['name'] != 'STDIN':
file['output'][i].close() file['output'][i].close()
if file['name'] != 'STDIN':
file['input'].close() # close input geom file
#--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['input'].close()

View File

@ -4,10 +4,9 @@
import os,sys,string,re,math,numpy import os,sys,string,re,math,numpy
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
#--------------------------------------------------------------------------------------------------
# -----------------------------
class extendedOption(Option): class extendedOption(Option):
# ----------------------------- #--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments # used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html # taken from online tutorial http://docs.python.org/library/optparse.html
@ -23,18 +22,18 @@ class extendedOption(Option):
else: else:
Option.take_action(self, action, dest, opt, value, values, parser) Option.take_action(self, action, dest, opt, value, values, parser)
#--------------------------------------------------------------------------------------------------
# ----------------------- MAIN ------------------------------- # MAIN
#--------------------------------------------------------------------------------------------------
identifiers = { identifiers = {
'resolution': ['a','b','c'], 'grid': ['a','b','c'],
'dimension': ['x','y','z'], 'size': ['x','y','z'],
'origin': ['x','y','z'], 'origin': ['x','y','z'],
} }
mappings = { mappings = {
'resolution': lambda x: int(x), 'grid': lambda x: int(x),
'dimension': lambda x: float(x), 'size': lambda x: float(x),
'origin': lambda x: float(x), 'origin': lambda x: float(x),
'homogenization': lambda x: int(x), 'homogenization': lambda x: int(x),
} }
@ -61,11 +60,10 @@ parser.set_defaults(twoD = False)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
sub = {} sub = {}
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to" for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1]) sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1])
# ------------------------------------------ setup file handles --------------------------------------- #--- setup file handles ---------------------------------------------------------------------------
files = [] files = []
if filenames == []: if filenames == []:
files.append({'name':'STDIN', files.append({'name':'STDIN',
@ -82,18 +80,15 @@ else:
'croak':sys.stdout, 'croak':sys.stdout,
}) })
# ------------------------------------------ loop over input files --------------------------------------- #--- loop over input files ------------------------------------------------------------------------
for file in files: for file in files:
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n') if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline() firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower()) m = re.search('(\d+)\s*head', firstline.lower())
if m: if m:
headerlines = int(m.group(1)) headerlines = int(m.group(1))
headers = [firstline]+[file['input'].readline() for i in range(headerlines)] headers = [file['input'].readline() for i in range(headerlines)]
else: else:
headerlines = 1 headerlines = 1
headers = firstline headers = firstline
@ -101,15 +96,25 @@ for file in files:
content = file['input'].readlines() content = file['input'].readlines()
file['input'].close() file['input'].close()
info = {'resolution': numpy.array([0,0,0]), #--- interpretate header --------------------------------------------------------------------------
'dimension': numpy.array([0.0,0.0,0.0]), info = {
'origin': numpy.array([0.0,0.0,0.0]), 'grid': numpy.array([0,0,0]),
'homogenization': 1, 'size': numpy.array([0.0,0.0,0.0]),
'origin': numpy.zeros(3,'d'),
'microstructures': 0,
'homogenization': 0
} }
newInfo = {
'origin': numpy.zeros(3,'d'),
'microstructures': 0,
}
new_header = [] new_header = []
new_header.append('$Id$\n')
for header in headers: for header in headers:
headitems = map(str.lower,header.split()) headitems = map(str.lower,header.split())
if headitems[0] == 'resolution': headitems[0] = 'grid'
if headitems[0] == 'dimension': headitems[0] = 'size'
if headitems[0] in mappings.keys(): if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys(): if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])): for i in xrange(len(identifiers[headitems[0]])):
@ -117,67 +122,67 @@ for file in files:
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1]) mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else: else:
info[headitems[0]] = mappings[headitems[0]](headitems[1]) info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
new_header.append(header)
if numpy.all(info['resolution'] == 0): if numpy.all(info['grid'] == 0):
file['croak'].write('no resolution info found.\n') file['croak'].write('no grid info found.\n')
continue continue
if numpy.all(info['dimension'] == 0.0): if numpy.all(info['size'] == 0.0):
file['croak'].write('no dimension info found.\n') file['croak'].write('no size info found.\n')
continue continue
file['croak'].write('resolution: %s\n'%(' x '.join(map(str,info['resolution']))) + \ file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'dimension: %s\n'%(' x '.join(map(str,info['dimension']))) + \ 'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin: %s\n'%(' : '.join(map(str,info['origin']))) + \ 'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization']) 'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n\n'%info['microstructures'])
new_header.append("resolution\ta %i\tb %i\tc %i\n"%(
info['resolution'][0],
info['resolution'][1],
info['resolution'][2],))
new_header.append("dimension\tx %f\ty %f\tz %f\n"%(
info['dimension'][0],
info['dimension'][1],
info['dimension'][2]))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
info['origin'][0]+options.origin[0],
info['origin'][1]+options.origin[1],
info['origin'][2]+options.origin[2]))
new_header.append("homogenization\t%i\n"%info['homogenization'])
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader\n'%(len(new_header))
output += ''.join(new_header)
file['output'].write(output)
# ------------------------------------------ process input --------------------------------------- #--- process input --------------------------------------------------------------------------------
N = info['grid'][0]*info['grid'][1]*info['grid'][2]
N = info['resolution'][0]*info['resolution'][1]*info['resolution'][2]
microstructure = numpy.zeros(N,'i') microstructure = numpy.zeros(N,'i')
i = 0 i = 0
for line in content: for line in content:
d = map(int,line.split()) d = map(int,line.split())
s = len(d) s = len(d)
microstructure[i:i+s] = d # read microstructure indices microstructure[i:i+s] = d # read microstructure indices
i += s i += s
for i in xrange(N): for i in xrange(N):
if microstructure[i] in sub: microstructure[i] = sub[microstructure[i]] # substitute microstructure indices if microstructure[i] in sub: microstructure[i] = sub[microstructure[i]] # substitute microstructure indices
microstructure += options.microstructure # shift microstructure indices microstructure += options.microstructure # shift microstructure indices
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
i = 0
for z in xrange(info['resolution'][2]):
for y in xrange(info['resolution'][1]):
output = {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%x, microstructure[i:i+info['resolution'][0]])) + '\n'
file['output'].write(output)
i += info['resolution'][0]
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
# ------------------------------------------ output finalization --------------------------------------- #--- assemble header and report changes -----------------------------------------------------------
newInfo['origin'] = info['origin'] + options.origin
newInfo['microstructures'] = microstructure.max()
if (any(newInfo['origin'] != info['origin'])):
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin']))))
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
new_header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],))
new_header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],))
new_header.append("microstructures\t%i\n"%newInfo['microstructures'])
new_header.append("homogenization\t%i\n"%info['homogenization'])
file['output'].write('%i\theader\n'%(len(new_header))+''.join(new_header))
#--- write new data -------------------------------------------------------------------------------
i = 0
for z in xrange(info['grid'][2]):
for y in xrange(info['grid'][1]):
output = {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%x,
microstructure[i:i+info['grid'][0]])) + '\n'
file['output'].write(output)
i += info['grid'][0]
#--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN': if file['name'] != 'STDIN':
file['output'].close() file['output'].close()
os.rename(file['name']+'_tmp',file['name']) os.rename(file['name']+'_tmp',file['name'])

View File

@ -1,4 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
''' '''
Writes meaningful labels to the marc input file (*.dat) Writes meaningful labels to the marc input file (*.dat)
based on the files based on the files

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*
import sys,os,pwd,math,re,string,numpy, damask import sys,os,pwd,math,re,string,numpy, damask
from optparse import OptionParser from optparse import OptionParser
@ -149,10 +150,9 @@ def servoLink():
return cmds return cmds
#--------------------------------------------------------------------------------------------------
# MAIN
# ----------------------- MAIN ------------------------------- #--------------------------------------------------------------------------------------------------
parser = OptionParser(usage='%prog [options]', description = """ parser = OptionParser(usage='%prog [options]', description = """
Set up servo linking to achieve periodic boundary conditions for a regular hexahedral mesh presently opened in MSC.Mentat Set up servo linking to achieve periodic boundary conditions for a regular hexahedral mesh presently opened in MSC.Mentat

View File

@ -1,12 +1,12 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os, sys, math, re, threading, time, string, damask import os, sys, math, re, threading, time, string, damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
#-------------------------------------------------------------------------------------------------
# ----------------------- FUNCTIONS ----------------------------
def outMentat(cmd,locals): def outMentat(cmd,locals):
#-------------------------------------------------------------------------------------------------
if cmd[0:3] == '(!)': if cmd[0:3] == '(!)':
exec(cmd[3:]) exec(cmd[3:])
elif cmd[0:3] == '(?)': elif cmd[0:3] == '(?)':
@ -16,7 +16,9 @@ def outMentat(cmd,locals):
py_send(cmd) py_send(cmd)
return return
#-------------------------------------------------------------------------------------------------
def outStdout(cmd,locals): def outStdout(cmd,locals):
#-------------------------------------------------------------------------------------------------
if cmd[0:3] == '(!)': if cmd[0:3] == '(!)':
exec(cmd[3:]) exec(cmd[3:])
elif cmd[0:3] == '(?)': elif cmd[0:3] == '(?)':
@ -26,8 +28,9 @@ def outStdout(cmd,locals):
print cmd print cmd
return return
#-------------------------------------------------------------------------------------------------
def output(cmds,locals,dest): def output(cmds,locals,dest):
#-------------------------------------------------------------------------------------------------
for cmd in cmds: for cmd in cmds:
if isinstance(cmd,list): if isinstance(cmd,list):
output(cmd,locals,dest) output(cmd,locals,dest)
@ -40,9 +43,9 @@ def output(cmds,locals,dest):
#-------------------- #-------------------------------------------------------------------------------------------------
def init(): def init():
#-------------------- #-------------------------------------------------------------------------------------------------
return ["*new_model yes", return ["*new_model yes",
"*reset", "*reset",
"*select_clear", "*select_clear",
@ -56,9 +59,9 @@ def init():
] ]
#-------------------- #-------------------------------------------------------------------------------------------------
def mesh(r,d): def mesh(r,d):
#-------------------- #-------------------------------------------------------------------------------------------------
return [ return [
"*add_nodes", "*add_nodes",
"%f %f %f"%(0.0,0.0,0.0), "%f %f %f"%(0.0,0.0,0.0),
@ -88,9 +91,9 @@ def mesh(r,d):
] ]
#-------------------- #-------------------------------------------------------------------------------------------------
def material(): def material():
#-------------------- #-------------------------------------------------------------------------------------------------
cmds = [\ cmds = [\
"*new_mater standard", "*new_mater standard",
"*mater_option general:state:solid", "*mater_option general:state:solid",
@ -100,7 +103,7 @@ def material():
"*add_mater_elements", "*add_mater_elements",
"all_existing", "all_existing",
"*geometry_type mech_three_solid", "*geometry_type mech_three_solid",
# "*geometry_option red_integ_capacity:on", # see below: reduced integration with one IP gave trouble being always OUTDATED... # "*geometry_option red_integ_capacity:on", # see below: reduced integration with one IP gave trouble being always OUTDATED...
"*add_geometry_elements", "*add_geometry_elements",
"all_existing", "all_existing",
] ]
@ -108,27 +111,27 @@ def material():
return cmds return cmds
#-------------------- #-------------------------------------------------------------------------------------------------
def geometry(): def geometry():
#-------------------- #-------------------------------------------------------------------------------------------------
cmds = [\ cmds = [\
"*geometry_type mech_three_solid", "*geometry_type mech_three_solid",
# "*geometry_option red_integ_capacity:on", # "*geometry_option red_integ_capacity:on",
"*add_geometry_elements", "*add_geometry_elements",
"all_existing", "all_existing",
"*element_type 7", # we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config) "*element_type 7", # we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config)
"all_existing", "all_existing",
] ]
return cmds return cmds
#-------------------- #-------------------------------------------------------------------------------------------------
def initial_conditions(homogenization,grains): def initial_conditions(homogenization,microstructures):
#-------------------- #-------------------------------------------------------------------------------------------------
elements = [] elements = []
element = 0 element = 0
for id in grains: for id in microstructures:
element += 1 element += 1
if len(elements) < id: if len(elements) < id:
for i in range(id-len(elements)): for i in range(id-len(elements)):
@ -166,61 +169,62 @@ def initial_conditions(homogenization,grains):
return cmds return cmds
#-------------------- #-------------------------------------------------------------------------------------------------
def parse_geomFile(content,homog): def parse_geomFile(content,homog):
#-------------------- #-------------------------------------------------------------------------------------------------
(skip,key) = content[0].split()[:2] (skip,key) = content[0].split()[:2]
if key[:4].lower() == 'head': if key[:4].lower() == 'head':
skip = int(skip)+1 skip = int(skip)+1
else: else:
skip = 0 skip = 0
res = [0,0,0] grid = [0,0,0]
dim = [0.0,0.0,0.0] size = [0.0,0.0,0.0]
homog = 0 homog = 0
for line in content[:skip]: for line in content[:skip]:
data = line.split() data = line.split()
if data[0].lower() == 'resolution': if data[0].lower() == 'grid' or data[0].lower() == 'resolution':
res = map(int,data[2:8:2]) grid = map(int,data[2:8:2])
if data[0].lower() == 'dimension': if data[0].lower() == 'size' or data[0].lower() == 'dimension':
dim = map(float,data[2:8:2]) size = map(float,data[2:8:2])
if data[0].lower() == 'homogenization': if data[0].lower() == 'homogenization':
homog = int(data[1]) homog = int(data[1])
grains = [] microstructures = []
for line in content[skip:]: for line in content[skip:]:
grains.append(int(line.split()[0])) microstructures.append(int(line.split()[0]))
return (res,dim,homog,grains) return (grid,size,homog,microstructures)
#-------------------- #-------------------------------------------------------------------------------------------------
def parse_spectralFile(content,homog): def parse_spectralFile(content,homog):
#-------------------- #-------------------------------------------------------------------------------------------------
coords = [{},{},{}] coords = [{},{},{}]
maxBox = [-1.0e20,-1.0e20,-1.0e20] maxBox = [-1.0e20,-1.0e20,-1.0e20]
minBox = [ 1.0e20, 1.0e20, 1.0e20] minBox = [ 1.0e20, 1.0e20, 1.0e20]
dim = [0.0,0.0,0.0] grid = [0.0,0.0,0.0]
res = [0,0,0] size = [0,0,0]
grains = [] microstructures = []
for line in content: for line in content:
data = line.split()[3:7] data = line.split()[3:7]
grains.append(int(data[3])) microstructures.append(int(data[3]))
for i in range(3): for i in range(3):
maxBox[i] = max(maxBox[i],float(data[i])) maxBox[i] = max(maxBox[i],float(data[i]))
minBox[i] = min(minBox[i],float(data[i])) minBox[i] = min(minBox[i],float(data[i]))
coords[i][data[i]] = True coords[i][data[i]] = True
for i in range(3): for i in range(3):
res[i] = len(coords[i]) grid[i] = len(coords[i])
dim[i] = (maxBox[i]-minBox[i])*res[i]/(res[i]-1.0) size[i] = (maxBox[i]-minBox[i])*grid[i]/(grid[i]-1.0)
return (res,dim,homog,grains) return (grid,size,homog,microstructures)
# ----------------------- MAIN -------------------------------
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(usage='%prog [options] spectral.datafile', description = """ parser = OptionParser(usage='%prog [options] spectral.datafile', description = """
Generate FE hexahedral mesh from spectral description file. Generate FE hexahedral mesh from spectral description file.
@ -231,18 +235,18 @@ spectral: phi1,Phi,phi2,x,y,z,id,phase.
""" + string.replace('$Id$','\n','\\n') """ + string.replace('$Id$','\n','\\n')
) )
parser.add_option("-p", "--port", type="int",\ parser.add_option("-p", "--port", type="int",\
dest="port",\ dest="port",\
help="Mentat connection port") help="Mentat connection port")
parser.add_option("-g", "--geom", action="store_const", const="geom",\ parser.add_option("-g", "--geom", action="store_const", const="geom",\
dest="filetype",\ dest="filetype",\
help="file has 'geom' format") help="file has 'geom' format")
parser.add_option("-s", "--spectral", action="store_const", const="spectral",\ parser.add_option("-s", "--spectral", action="store_const", const="spectral",\
dest="filetype",\ dest="filetype",\
help="file has 'spectral' format") help="file has 'spectral' format (VPSC Lebensohn)")
parser.add_option("--homogenization", type="int",\ parser.add_option("--homogenization", type="int",\
dest="homogenization",\ dest="homogenization",\
help="homogenization index from material.config (only required for spectral file type)") help="homogenization index from material.config (only required for spectral file type)")
parser.set_defaults(filetype = 'geom') parser.set_defaults(filetype = 'geom')
@ -272,20 +276,20 @@ if options.filetype not in ['spectral','geom']:
print '\nparsing %s...'%options.filetype, print '\nparsing %s...'%options.filetype,
sys.stdout.flush() sys.stdout.flush()
(res,dim,homog,grains) = {\ (grid,size,homog,microstructures) = {\
'geom': parse_geomFile, 'geom': parse_geomFile,
'spectral': parse_spectralFile, 'spectral': parse_spectralFile,
}[options.filetype](content,options.homogenization) }[options.filetype](content,options.homogenization)
print '%i grains in %s with resolution %s and homogenization %i\n'%(len(list(set(grains))),str(dim),str(res),homog) print '%i microstructures in %s with grid %s and homogenization %i\n'%(len(list(set(microstructures))),str(size),str(grid),homog)
cmds = [\ cmds = [\
init(), init(),
mesh(res,dim), mesh(grid,size),
material(), material(),
geometry(), geometry(),
initial_conditions(homog,grains), initial_conditions(homog,microstructures),
'*identify_sets', '*identify_sets',
'*redraw', '*redraw',
] ]

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import sys,os,math,re,string, damask import sys,os,math,re,string, damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP