made spectral preprocessing more verbose (now reporting changes on header) and aware of new keywords, added coding information below shebang

This commit is contained in:
Martin Diehl 2013-05-13 11:27:59 +00:00
parent deb2bd7dca
commit 52af9aaabf
7 changed files with 228 additions and 218 deletions

View File

@ -4,9 +4,9 @@
import string,os,sys
from optparse import OptionParser, Option
# -----------------------------
#-------------------------------------------------------------------------------------------------
class extendableOption(Option):
# -----------------------------
#-------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
@ -23,10 +23,9 @@ class extendableOption(Option):
Option.take_action(self, action, dest, opt, value, values, parser)
# --------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """
Converts ang files (EBSD Data) from hexagonal grid to a pixel grid
@ -42,10 +41,9 @@ parser.set_defaults(columnX = 3)
counterX = 0
counterY = 0
addPoints = -1 # No of doubled points (must be the same for each odd/even line, initializing with -1 make countin easy!)
# ------------------------------------------ setup file handles ---------------------------------------
addPoints = -1 # No of doubled points (must be the same for each odd/even line, initializing with -1 make countin easy!)
#--- setup file handles ---------------------------------------------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout})
@ -55,8 +53,7 @@ else:
files.append( {'name':name, 'input':open(name),'output':open(os.path.splitext(name)[0]\
+'_cub'+os.path.splitext(name)[1], 'w')})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files:
print file['name']
x = 0

View File

@ -1,12 +1,13 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,re,sys,math,numpy,string,damask
from scipy import ndimage
from optparse import OptionParser, Option
# -----------------------------
#--------------------------------------------------------------------------------------------------
class extendableOption(Option):
# -----------------------------
#--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
@ -44,23 +45,25 @@ def periodic_3Dpad(array, rimdim=(1,1,1)):
padded[p[0],p[1],p[2]] = array[spot[0],spot[1],spot[2]]
return padded
# --------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
identifiers = {
'resolution': ['a','b','c'],
'dimension': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'resolution': lambda x: int(x),
'dimension': lambda x: float(x),
'origin': lambda x: float(x),
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
features = [ \
{'aliens': 1, 'names': ['boundary','biplane'],},
mappings = {
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
'microstructures': lambda x: int(x),
}
features = [
{'aliens': 1, 'names': ['boundary, biplane'],},
{'aliens': 2, 'names': ['tripleline',],},
{'aliens': 3, 'names': ['quadruplepoint',],}
]
@ -113,12 +116,11 @@ boundaries, triple lines, and quadruple points.
)
parser.add_option('-t','--type', dest='type', action='extend', type='string', \
help='feature type (%s)'%(', '.join(map(lambda x:', '.join(x['names']),features))))
help='feature type (%s)'%(', '.join(map(lambda x:', '.join(x['names']),features))))
parser.add_option('-n','--neighborhood', dest='neigborhood', action='store', type='string', \
help='type of neighborhood (%s)'%(', '.join(neighborhoods.keys())), \
metavar='<int>')
help='type of neighborhood (%s) [neumann]'%(', '.join(neighborhoods.keys())))
parser.add_option('-2', '--twodimensional', dest='twoD', action='store_true', \
help='output geom file with two-dimensional data arrangement')
help='output geom file with two-dimensional data arrangement [%default]')
parser.set_defaults(type = [])
parser.set_defaults(neighborhood = 'neumann')
@ -133,14 +135,13 @@ if options.neighborhood not in neighborhoods:
feature_list = []
for i,feature in enumerate(features):
for name in feature['names']:
for type in options.type:
if name.startswith(type):
feature_list.append(i) # remember valid features
for myType in options.type:
if name.startswith(myType):
feature_list.append(i) # remember valid features
break
print feature_list
# ------------------------------------------ setup file handles ---------------------------------------
#--- setup file handles ---------------------------------------------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN',
@ -157,18 +158,15 @@ else:
'croak':sys.stdout,
})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
headers = [firstline]+[file['input'].readline() for i in range(headerlines)]
headers = [file['input'].readline() for i in range(headerlines)]
else:
headerlines = 1
headers = firstline
@ -176,15 +174,25 @@ for file in files:
content = file['input'].readlines()
file['input'].close()
info = {'resolution': numpy.array([0,0,0]),
'dimension': numpy.array([0.0,0.0,0.0]),
'origin': numpy.array([0.0,0.0,0.0]),
'homogenization': 1,
#--- interpretate header --------------------------------------------------------------------------
info = {
'grid': numpy.array([0,0,0]),
'size': numpy.array([0.0,0.0,0.0]),
'origin': numpy.array([0.0,0.0,0.0]),
'microstructures': 0,
'homogenization': 0
}
newInfo = {
'microstructures': 0,
}
new_header = []
new_header.append('$Id$\n')
for header in headers:
headitems = map(str.lower,header.split())
if headitems[0] == 'resolution': headitems[0] = 'grid'
if headitems[0] == 'dimension': headitems[0] = 'size'
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
@ -192,44 +200,39 @@ for file in files:
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
new_header.append(header)
if numpy.all(info['resolution'] == 0):
file['croak'].write('no resolution info found.\n')
if numpy.all(info['grid'] == 0):
file['croak'].write('no grid info found.\n')
continue
if numpy.all(info['dimension'] == 0.0):
file['croak'].write('no dimension info found.\n')
if numpy.all(info['size'] == 0.0):
file['croak'].write('no size info found.\n')
continue
file['croak'].write('resolution: %s\n'%(' x '.join(map(str,info['resolution']))) + \
'dimension: %s\n'%(' x '.join(map(str,info['dimension']))) + \
'origin: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'])
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n'%info['microstructures'])
new_header.append("resolution\ta %i\tb %i\tc %i\n"%(
info['resolution'][0],
info['resolution'][1],
info['resolution'][2],))
new_header.append("dimension\tx %f\ty %f\tz %f\n"%(
info['dimension'][0],
info['dimension'][1],
info['dimension'][2],))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
info['origin'][0],
info['origin'][1],
info['origin'][2],))
new_header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],))
new_header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(info['origin'][0],info['origin'][1],info['origin'][2],))
new_header.append("homogenization\t%i\n"%info['homogenization'])
structure = numpy.zeros(info['resolution'],'i')
#--- process input --------------------------------------------------------------------------------
structure = numpy.zeros(info['grid'],'i')
i = 0
for line in content:
for item in map(int,line.split()):
structure[i%info['resolution'][0],
(i/info['resolution'][0])%info['resolution'][1],
i/info['resolution'][0] /info['resolution'][1]] = item
structure[i%info['grid'][0],
(i/info['grid'][0])%info['grid'][1],
i/info['grid'][0] /info['grid'][1]] = item
i += 1
neighborhood = neighborhoods[options.neighborhood]
convoluted = numpy.empty([len(neighborhood)]+list(info['resolution']+2),'i')
convoluted = numpy.empty([len(neighborhood)]+list(info['grid']+2),'i')
microstructure = periodic_3Dpad(structure)
for i,p in enumerate(neighborhood):
@ -241,11 +244,11 @@ for file in files:
convoluted[i,:,:,:] = ndimage.convolve(microstructure,stencil)
distance = numpy.ones((len(feature_list),info['resolution'][0],info['resolution'][1],info['resolution'][2]),'d')
distance = numpy.ones((len(feature_list),info['grid'][0],info['grid'][1],info['grid'][2]),'d')
convoluted = numpy.sort(convoluted,axis=0)
uniques = numpy.zeros(info['resolution'])
check = numpy.empty(info['resolution'])
uniques = numpy.zeros(info['grid'])
check = numpy.empty(info['grid'])
check[:,:,:] = numpy.nan
for i in xrange(len(neighborhood)):
uniques += numpy.where(convoluted[i,1:-1,1:-1,1:-1] == check,0,1)
@ -254,32 +257,30 @@ for file in files:
distance[i,:,:,:] = numpy.where(uniques > features[feature_id]['aliens'],0.0,1.0)
for i in xrange(len(feature_list)):
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[max(info['dimension']/info['resolution'])]*3
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*\
[max(info['size']/info['grid'])]*3
for i,feature in enumerate(feature_list):
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1))
newInfo['microstructures'] = int(math.ceil(distance[i,:,:,:].max()))
formatwidth = int(math.floor(math.log10(distance[i,:,:,:].max())+1))
# ------------------------------------------ assemble header ---------------------------------------
#--- assemble header and report changes -----------------------------------------------------------
output = '%i\theader\n'%(len(new_header)+1)
output += ''.join(new_header)
output += "microstructures\t%i\n"%newInfo['microstructures']
file['croak'].write('\n'+features[i]['names'][0]+'\n')
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
output = '%i\theader\n'%(len(new_header))
output += ''.join(new_header)
#--- write new data -------------------------------------------------------------------------------
for z in xrange(info['grid'][2]):
for y in xrange(info['grid'][1]):
output += {True:' ',False:'\n'}[options.twoD].join(map(lambda x: \
('%%%ii'%formatwidth)%(round(x)), distance[i,:,y,z])) + '\n'
file['output'][i].write(output)
# ------------------------------------- regenerate texture information ----------------------------------
for z in xrange(info['resolution'][2]):
for y in xrange(info['resolution'][1]):
output += {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%(round(x)), distance[i,:,y,z])) + '\n'
# ------------------------------------------ output result ---------------------------------------
file['output'][i].write(output)
if file['name'] != 'STDIN':
file['output'][i].close()
if file['name'] != 'STDIN':
file['output'][i].close()
#--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['input'].close() # close input geom file
file['input'].close()

View File

@ -4,10 +4,9 @@
import os,sys,string,re,math,numpy
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
# -----------------------------
#--------------------------------------------------------------------------------------------------
class extendedOption(Option):
# -----------------------------
#--------------------------------------------------------------------------------------------------
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
# taken from online tutorial http://docs.python.org/library/optparse.html
@ -23,18 +22,18 @@ class extendedOption(Option):
else:
Option.take_action(self, action, dest, opt, value, values, parser)
# ----------------------- MAIN -------------------------------
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
identifiers = {
'resolution': ['a','b','c'],
'dimension': ['x','y','z'],
'origin': ['x','y','z'],
'grid': ['a','b','c'],
'size': ['x','y','z'],
'origin': ['x','y','z'],
}
mappings = {
'resolution': lambda x: int(x),
'dimension': lambda x: float(x),
'origin': lambda x: float(x),
'grid': lambda x: int(x),
'size': lambda x: float(x),
'origin': lambda x: float(x),
'homogenization': lambda x: int(x),
}
@ -61,11 +60,10 @@ parser.set_defaults(twoD = False)
(options, filenames) = parser.parse_args()
sub = {}
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1])
# ------------------------------------------ setup file handles ---------------------------------------
#--- setup file handles ---------------------------------------------------------------------------
files = []
if filenames == []:
files.append({'name':'STDIN',
@ -82,18 +80,15 @@ else:
'croak':sys.stdout,
})
# ------------------------------------------ loop over input files ---------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write(file['name']+'\n')
# get labels by either read the first row, or - if keyword header is present - the last line of the header
firstline = file['input'].readline()
m = re.search('(\d+)\s*head', firstline.lower())
if m:
headerlines = int(m.group(1))
headers = [firstline]+[file['input'].readline() for i in range(headerlines)]
headers = [file['input'].readline() for i in range(headerlines)]
else:
headerlines = 1
headers = firstline
@ -101,15 +96,25 @@ for file in files:
content = file['input'].readlines()
file['input'].close()
info = {'resolution': numpy.array([0,0,0]),
'dimension': numpy.array([0.0,0.0,0.0]),
'origin': numpy.array([0.0,0.0,0.0]),
'homogenization': 1,
#--- interpretate header --------------------------------------------------------------------------
info = {
'grid': numpy.array([0,0,0]),
'size': numpy.array([0.0,0.0,0.0]),
'origin': numpy.zeros(3,'d'),
'microstructures': 0,
'homogenization': 0
}
newInfo = {
'origin': numpy.zeros(3,'d'),
'microstructures': 0,
}
new_header = []
new_header.append('$Id$\n')
for header in headers:
headitems = map(str.lower,header.split())
if headitems[0] == 'resolution': headitems[0] = 'grid'
if headitems[0] == 'dimension': headitems[0] = 'size'
if headitems[0] in mappings.keys():
if headitems[0] in identifiers.keys():
for i in xrange(len(identifiers[headitems[0]])):
@ -117,67 +122,67 @@ for file in files:
mappings[headitems[0]](headitems[headitems.index(identifiers[headitems[0]][i])+1])
else:
info[headitems[0]] = mappings[headitems[0]](headitems[1])
else:
new_header.append(header)
if numpy.all(info['resolution'] == 0):
file['croak'].write('no resolution info found.\n')
if numpy.all(info['grid'] == 0):
file['croak'].write('no grid info found.\n')
continue
if numpy.all(info['dimension'] == 0.0):
file['croak'].write('no dimension info found.\n')
if numpy.all(info['size'] == 0.0):
file['croak'].write('no size info found.\n')
continue
file['croak'].write('resolution: %s\n'%(' x '.join(map(str,info['resolution']))) + \
'dimension: %s\n'%(' x '.join(map(str,info['dimension']))) + \
'origin: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'])
file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,info['grid']))) + \
'size x y z: %s\n'%(' x '.join(map(str,info['size']))) + \
'origin x y z: %s\n'%(' : '.join(map(str,info['origin']))) + \
'homogenization: %i\n'%info['homogenization'] + \
'microstructures: %i\n\n'%info['microstructures'])
new_header.append("resolution\ta %i\tb %i\tc %i\n"%(
info['resolution'][0],
info['resolution'][1],
info['resolution'][2],))
new_header.append("dimension\tx %f\ty %f\tz %f\n"%(
info['dimension'][0],
info['dimension'][1],
info['dimension'][2]))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
info['origin'][0]+options.origin[0],
info['origin'][1]+options.origin[1],
info['origin'][2]+options.origin[2]))
new_header.append("homogenization\t%i\n"%info['homogenization'])
# ------------------------------------------ assemble header ---------------------------------------
output = '%i\theader\n'%(len(new_header))
output += ''.join(new_header)
file['output'].write(output)
# ------------------------------------------ process input ---------------------------------------
N = info['resolution'][0]*info['resolution'][1]*info['resolution'][2]
#--- process input --------------------------------------------------------------------------------
N = info['grid'][0]*info['grid'][1]*info['grid'][2]
microstructure = numpy.zeros(N,'i')
i = 0
for line in content:
d = map(int,line.split())
s = len(d)
microstructure[i:i+s] = d # read microstructure indices
microstructure[i:i+s] = d # read microstructure indices
i += s
for i in xrange(N):
if microstructure[i] in sub: microstructure[i] = sub[microstructure[i]] # substitute microstructure indices
if microstructure[i] in sub: microstructure[i] = sub[microstructure[i]] # substitute microstructure indices
microstructure += options.microstructure # shift microstructure indices
microstructure += options.microstructure # shift microstructure indices
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
#--- assemble header and report changes -----------------------------------------------------------
newInfo['origin'] = info['origin'] + options.origin
newInfo['microstructures'] = microstructure.max()
if (any(newInfo['origin'] != info['origin'])):
file['croak'].write('--> origin x y z: %s\n'%(' : '.join(map(str,newInfo['origin']))))
if (newInfo['microstructures'] != info['microstructures']):
file['croak'].write('--> microstructures: %i\n'%newInfo['microstructures'])
new_header.append("grid\ta %i\tb %i\tc %i\n"%(info['grid'][0],info['grid'][1],info['grid'][2],))
new_header.append("size\tx %f\ty %f\tz %f\n"%(info['size'][0],info['size'][1],info['size'][2],))
new_header.append("origin\tx %f\ty %f\tz %f\n"%(
newInfo['origin'][0],newInfo['origin'][1],newInfo['origin'][2],))
new_header.append("microstructures\t%i\n"%newInfo['microstructures'])
new_header.append("homogenization\t%i\n"%info['homogenization'])
file['output'].write('%i\theader\n'%(len(new_header))+''.join(new_header))
#--- write new data -------------------------------------------------------------------------------
i = 0
for z in xrange(info['resolution'][2]):
for y in xrange(info['resolution'][1]):
output = {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%x, microstructure[i:i+info['resolution'][0]])) + '\n'
for z in xrange(info['grid'][2]):
for y in xrange(info['grid'][1]):
output = {True:' ',False:'\n'}[options.twoD].join(map(lambda x: ('%%%ii'%formatwidth)%x,
microstructure[i:i+info['grid'][0]])) + '\n'
file['output'].write(output)
i += info['resolution'][0]
# ------------------------------------------ output finalization ---------------------------------------
i += info['grid'][0]
#--- output finalization --------------------------------------------------------------------------
if file['name'] != 'STDIN':
file['output'].close()
os.rename(file['name']+'_tmp',file['name'])

View File

@ -1,4 +1,6 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
'''
Writes meaningful labels to the marc input file (*.dat)
based on the files

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*
import sys,os,pwd,math,re,string,numpy, damask
from optparse import OptionParser
@ -149,10 +150,9 @@ def servoLink():
return cmds
# ----------------------- MAIN -------------------------------
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(usage='%prog [options]', description = """
Set up servo linking to achieve periodic boundary conditions for a regular hexahedral mesh presently opened in MSC.Mentat

View File

@ -1,12 +1,12 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os, sys, math, re, threading, time, string, damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
# ----------------------- FUNCTIONS ----------------------------
#-------------------------------------------------------------------------------------------------
def outMentat(cmd,locals):
#-------------------------------------------------------------------------------------------------
if cmd[0:3] == '(!)':
exec(cmd[3:])
elif cmd[0:3] == '(?)':
@ -16,7 +16,9 @@ def outMentat(cmd,locals):
py_send(cmd)
return
#-------------------------------------------------------------------------------------------------
def outStdout(cmd,locals):
#-------------------------------------------------------------------------------------------------
if cmd[0:3] == '(!)':
exec(cmd[3:])
elif cmd[0:3] == '(?)':
@ -26,8 +28,9 @@ def outStdout(cmd,locals):
print cmd
return
#-------------------------------------------------------------------------------------------------
def output(cmds,locals,dest):
#-------------------------------------------------------------------------------------------------
for cmd in cmds:
if isinstance(cmd,list):
output(cmd,locals,dest)
@ -40,9 +43,9 @@ def output(cmds,locals,dest):
#--------------------
#-------------------------------------------------------------------------------------------------
def init():
#--------------------
#-------------------------------------------------------------------------------------------------
return ["*new_model yes",
"*reset",
"*select_clear",
@ -56,9 +59,9 @@ def init():
]
#--------------------
#-------------------------------------------------------------------------------------------------
def mesh(r,d):
#--------------------
#-------------------------------------------------------------------------------------------------
return [
"*add_nodes",
"%f %f %f"%(0.0,0.0,0.0),
@ -88,9 +91,9 @@ def mesh(r,d):
]
#--------------------
#-------------------------------------------------------------------------------------------------
def material():
#--------------------
#-------------------------------------------------------------------------------------------------
cmds = [\
"*new_mater standard",
"*mater_option general:state:solid",
@ -100,7 +103,7 @@ def material():
"*add_mater_elements",
"all_existing",
"*geometry_type mech_three_solid",
# "*geometry_option red_integ_capacity:on", # see below: reduced integration with one IP gave trouble being always OUTDATED...
# "*geometry_option red_integ_capacity:on", # see below: reduced integration with one IP gave trouble being always OUTDATED...
"*add_geometry_elements",
"all_existing",
]
@ -108,27 +111,27 @@ def material():
return cmds
#--------------------
#-------------------------------------------------------------------------------------------------
def geometry():
#--------------------
#-------------------------------------------------------------------------------------------------
cmds = [\
"*geometry_type mech_three_solid",
# "*geometry_option red_integ_capacity:on",
"*add_geometry_elements",
"all_existing",
"*element_type 7", # we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config)
"*element_type 7", # we are NOT using reduced integration (type 117) but opt for /elementhomogeneous/ in the respective phase description (material.config)
"all_existing",
]
return cmds
#--------------------
def initial_conditions(homogenization,grains):
#--------------------
#-------------------------------------------------------------------------------------------------
def initial_conditions(homogenization,microstructures):
#-------------------------------------------------------------------------------------------------
elements = []
element = 0
for id in grains:
for id in microstructures:
element += 1
if len(elements) < id:
for i in range(id-len(elements)):
@ -166,61 +169,62 @@ def initial_conditions(homogenization,grains):
return cmds
#--------------------
#-------------------------------------------------------------------------------------------------
def parse_geomFile(content,homog):
#--------------------
#-------------------------------------------------------------------------------------------------
(skip,key) = content[0].split()[:2]
if key[:4].lower() == 'head':
skip = int(skip)+1
else:
skip = 0
res = [0,0,0]
dim = [0.0,0.0,0.0]
grid = [0,0,0]
size = [0.0,0.0,0.0]
homog = 0
for line in content[:skip]:
data = line.split()
if data[0].lower() == 'resolution':
res = map(int,data[2:8:2])
if data[0].lower() == 'dimension':
dim = map(float,data[2:8:2])
if data[0].lower() == 'grid' or data[0].lower() == 'resolution':
grid = map(int,data[2:8:2])
if data[0].lower() == 'size' or data[0].lower() == 'dimension':
size = map(float,data[2:8:2])
if data[0].lower() == 'homogenization':
homog = int(data[1])
grains = []
microstructures = []
for line in content[skip:]:
grains.append(int(line.split()[0]))
microstructures.append(int(line.split()[0]))
return (res,dim,homog,grains)
return (grid,size,homog,microstructures)
#--------------------
#-------------------------------------------------------------------------------------------------
def parse_spectralFile(content,homog):
#--------------------
#-------------------------------------------------------------------------------------------------
coords = [{},{},{}]
maxBox = [-1.0e20,-1.0e20,-1.0e20]
minBox = [ 1.0e20, 1.0e20, 1.0e20]
dim = [0.0,0.0,0.0]
res = [0,0,0]
grains = []
grid = [0.0,0.0,0.0]
size = [0,0,0]
microstructures = []
for line in content:
data = line.split()[3:7]
grains.append(int(data[3]))
microstructures.append(int(data[3]))
for i in range(3):
maxBox[i] = max(maxBox[i],float(data[i]))
minBox[i] = min(minBox[i],float(data[i]))
coords[i][data[i]] = True
for i in range(3):
res[i] = len(coords[i])
dim[i] = (maxBox[i]-minBox[i])*res[i]/(res[i]-1.0)
grid[i] = len(coords[i])
size[i] = (maxBox[i]-minBox[i])*grid[i]/(grid[i]-1.0)
return (res,dim,homog,grains)
# ----------------------- MAIN -------------------------------
return (grid,size,homog,microstructures)
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(usage='%prog [options] spectral.datafile', description = """
Generate FE hexahedral mesh from spectral description file.
@ -231,18 +235,18 @@ spectral: phi1,Phi,phi2,x,y,z,id,phase.
""" + string.replace('$Id$','\n','\\n')
)
parser.add_option("-p", "--port", type="int",\
dest="port",\
help="Mentat connection port")
dest="port",\
help="Mentat connection port")
parser.add_option("-g", "--geom", action="store_const", const="geom",\
dest="filetype",\
help="file has 'geom' format")
dest="filetype",\
help="file has 'geom' format")
parser.add_option("-s", "--spectral", action="store_const", const="spectral",\
dest="filetype",\
help="file has 'spectral' format")
dest="filetype",\
help="file has 'spectral' format (VPSC Lebensohn)")
parser.add_option("--homogenization", type="int",\
dest="homogenization",\
help="homogenization index from material.config (only required for spectral file type)")
dest="homogenization",\
help="homogenization index from material.config (only required for spectral file type)")
parser.set_defaults(filetype = 'geom')
@ -272,20 +276,20 @@ if options.filetype not in ['spectral','geom']:
print '\nparsing %s...'%options.filetype,
sys.stdout.flush()
(res,dim,homog,grains) = {\
(grid,size,homog,microstructures) = {\
'geom': parse_geomFile,
'spectral': parse_spectralFile,
}[options.filetype](content,options.homogenization)
print '%i grains in %s with resolution %s and homogenization %i\n'%(len(list(set(grains))),str(dim),str(res),homog)
print '%i microstructures in %s with grid %s and homogenization %i\n'%(len(list(set(microstructures))),str(size),str(grid),homog)
cmds = [\
init(),
mesh(res,dim),
mesh(grid,size),
material(),
geometry(),
initial_conditions(homog,grains),
initial_conditions(homog,microstructures),
'*identify_sets',
'*redraw',
]

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import sys,os,math,re,string, damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP