started to put Marc/Mentat related scripts to corresponding post/pre processing folders. We should move development from my Code folder to here (sorry, lost history then...)
This commit is contained in:
parent
d1ed15fbdf
commit
94efd57663
|
@ -0,0 +1,725 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
releases = ['2010b3','2008r1','2007r1','2005r3']
|
||||||
|
|
||||||
|
|
||||||
|
import os, sys, math, re, threading, time
|
||||||
|
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
|
||||||
|
for release in releases:
|
||||||
|
libPath = '/msc/mentat%s/shlib/'%release
|
||||||
|
if os.path.exists(libPath):
|
||||||
|
sys.path.append(libPath)
|
||||||
|
break
|
||||||
|
from py_post import *
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
class MyOption(Option):
|
||||||
|
# -----------------------------
|
||||||
|
# used for definition of new option parser action 'extend', which enables to take multiple option arguments
|
||||||
|
# taken from online tutorial http://docs.python.org/library/optparse.html
|
||||||
|
|
||||||
|
ACTIONS = Option.ACTIONS + ("extend",)
|
||||||
|
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
|
||||||
|
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
|
||||||
|
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
|
||||||
|
|
||||||
|
def take_action(self, action, dest, opt, value, values, parser):
|
||||||
|
if action == "extend":
|
||||||
|
lvalue = value.split(",")
|
||||||
|
values.ensure_value(dest, []).extend(lvalue)
|
||||||
|
else:
|
||||||
|
Option.take_action(self, action, dest, opt, value, values, parser)
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
class backgroundMessage(threading.Thread):
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
threading.Thread.__init__(self)
|
||||||
|
self.message = ''
|
||||||
|
self.new_message = ''
|
||||||
|
self.counter = 0
|
||||||
|
self.symbols = ['- ', '\ ', '| ', '/ ']
|
||||||
|
self.waittime = 0.5
|
||||||
|
|
||||||
|
def __quit__(self):
|
||||||
|
length = len(self.message) + len(self.symbols[self.counter])
|
||||||
|
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length)
|
||||||
|
sys.stderr.write('')
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while not threading.enumerate()[0]._Thread__stopped:
|
||||||
|
time.sleep(self.waittime)
|
||||||
|
self.update_message()
|
||||||
|
self.__quit__()
|
||||||
|
|
||||||
|
def set_message(self, new_message):
|
||||||
|
self.new_message = new_message
|
||||||
|
self.print_message()
|
||||||
|
|
||||||
|
def print_message(self):
|
||||||
|
length = len(self.message) + len(self.symbols[self.counter])
|
||||||
|
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length) # delete former message
|
||||||
|
sys.stderr.write(self.symbols[self.counter] + self.new_message) # print new message
|
||||||
|
self.message = self.new_message
|
||||||
|
|
||||||
|
def update_message(self):
|
||||||
|
self.counter = (self.counter + 1)%len(self.symbols)
|
||||||
|
self.print_message()
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def ipCoords(elemType, nodalCoordinates):
|
||||||
|
#
|
||||||
|
# returns IP coordinates for a given element
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
nodeWeightsPerNode = {
|
||||||
|
7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0],
|
||||||
|
[ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0],
|
||||||
|
[ 3.0, 9.0, 27.0, 9.0, 1.0, 3.0, 9.0, 3.0],
|
||||||
|
[ 9.0, 3.0, 9.0, 27.0, 3.0, 1.0, 3.0, 9.0],
|
||||||
|
[ 9.0, 3.0, 1.0, 3.0, 27.0, 9.0, 3.0, 9.0],
|
||||||
|
[ 3.0, 9.0, 3.0, 1.0, 9.0, 27.0, 9.0, 3.0],
|
||||||
|
[ 1.0, 3.0, 9.0, 3.0, 3.0, 9.0, 27.0, 9.0],
|
||||||
|
[ 3.0, 1.0, 3.0, 9.0, 9.0, 3.0, 9.0, 27.0] ],
|
||||||
|
117: [ [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
||||||
|
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] ],
|
||||||
|
136: [ [42.0, 15.0, 15.0, 14.0, 5.0, 5.0],
|
||||||
|
[15.0, 42.0, 15.0, 5.0, 14.0, 5.0],
|
||||||
|
[15.0, 15.0, 42.0, 5.0, 5.0, 14.0],
|
||||||
|
[14.0, 5.0, 5.0, 42.0, 15.0, 15.0],
|
||||||
|
[ 5.0, 14.0, 5.0, 15.0, 42.0, 15.0],
|
||||||
|
[ 5.0, 5.0, 14.0, 15.0, 15.0, 42.0] ],
|
||||||
|
}
|
||||||
|
|
||||||
|
ipCoordinates = [[0.0,0.0,0.0] for i in range(len(nodalCoordinates))]
|
||||||
|
for ip in range(len(nodeWeightsPerNode[elemType])):
|
||||||
|
for node in range(len(nodeWeightsPerNode[elemType][ip])):
|
||||||
|
for i in range(3):
|
||||||
|
ipCoordinates[ip][i] += nodeWeightsPerNode[elemType][ip][node] * nodalCoordinates[node][i]
|
||||||
|
for i in range(3):
|
||||||
|
ipCoordinates[ip][i] /= sum(nodeWeightsPerNode[elemType][ip])
|
||||||
|
|
||||||
|
return ipCoordinates
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def sortBySeparation(dataArray, criteria, offset):
|
||||||
|
#
|
||||||
|
# sorting of groupValue array according to list of criteria
|
||||||
|
# -----------------------------
|
||||||
|
where = {
|
||||||
|
'elem': 1,
|
||||||
|
'node': 2,
|
||||||
|
'grain': 3,
|
||||||
|
'x': 4,
|
||||||
|
'y': 5,
|
||||||
|
'z': 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
theKeys = []
|
||||||
|
for criterium in criteria:
|
||||||
|
if criterium in where:
|
||||||
|
theKeys.append('x[%i]'%(offset+where[criterium]))
|
||||||
|
exec('sortedArray = sorted(dataArray,key=lambda x:(%s))'%(','.join(theKeys)))
|
||||||
|
return sortedArray
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def substituteLocation(string, mesh, coords):
|
||||||
|
#
|
||||||
|
# do variable interpolation in group and filter strings
|
||||||
|
# -----------------------------
|
||||||
|
substitute = string
|
||||||
|
substitute = substitute.replace('elem', str(mesh[0]))
|
||||||
|
substitute = substitute.replace('node', str(mesh[1]))
|
||||||
|
substitute = substitute.replace('grain', str(mesh[2]))
|
||||||
|
substitute = substitute.replace('x', '%.6g'%coords[0])
|
||||||
|
substitute = substitute.replace('y', '%.6g'%coords[1])
|
||||||
|
substitute = substitute.replace('z', '%.6g'%coords[2])
|
||||||
|
return substitute
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def average(theList):
|
||||||
|
#
|
||||||
|
# calcs the average of a list of numbers
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
return sum(map(float,theList))/len(theList)
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def mapFunc(label, chunks, func):
|
||||||
|
#
|
||||||
|
# applies the function defined by "func"
|
||||||
|
# (can be either 'min','max','avg', 'sum', or user specified)
|
||||||
|
# to a list of lists of data
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
illegal = {
|
||||||
|
'eulerangles': ['min','max','avg','sum'],
|
||||||
|
'defgrad': ['min','max','avg','sum'],
|
||||||
|
'orientation': ['min','max','sum'],
|
||||||
|
}
|
||||||
|
|
||||||
|
if label.lower() in illegal and func in illegal[label.lower()]: # for illegal mappings:...
|
||||||
|
return ['n/a' for i in range(len(chunks[0]))] # ...return 'n/a'
|
||||||
|
|
||||||
|
else:
|
||||||
|
if func in ['min','max','avg']:
|
||||||
|
mapped = [{ 'min': lambda x: min(x),
|
||||||
|
'max': lambda x: max(x),
|
||||||
|
'avg': lambda x: average(x),
|
||||||
|
'sum': lambda x: sum(x),
|
||||||
|
}[func](column) for column in zip(*chunks)] # map one of the standard functions to colums in chunks
|
||||||
|
if label.lower() == 'orientation': # orientation is special case:...
|
||||||
|
orientationNorm = math.sqrt(sum([q*q for q in mapped])) # ...calc norm of average quaternion
|
||||||
|
mapped = map(lambda x: x/orientationNorm, mapped) # ...renormalize quaternion
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
mapped = eval('map(%s,zip(*chunks))'%func) # map user defined function to colums in chunks
|
||||||
|
except:
|
||||||
|
mapped = ['n/a' for i in range(len(chunks[0]))]
|
||||||
|
|
||||||
|
return mapped
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def OpenPostfile(name):
|
||||||
|
#
|
||||||
|
# open postfile with extrapolation mode "translate"
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
p = post_open(name)
|
||||||
|
p.extrapolation('translate')
|
||||||
|
p.moveto(1)
|
||||||
|
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def ParseOutputFormat(filename,what,me):
|
||||||
|
#
|
||||||
|
# parse .output* files in order to get a list of outputs
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
format = {'outputs':{},'specials':{'brothers':[]}}
|
||||||
|
for prefix in ['']+map(str,range(1,17)):
|
||||||
|
if os.path.exists(prefix+filename+'.output'+what):
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
file = open(prefix+filename+'.output'+what)
|
||||||
|
content = file.readlines()
|
||||||
|
file.close()
|
||||||
|
except:
|
||||||
|
return format
|
||||||
|
|
||||||
|
tag = ''
|
||||||
|
tagID = 0
|
||||||
|
for line in content:
|
||||||
|
if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments
|
||||||
|
continue
|
||||||
|
m = re.match("\[(.+)\]",line) # look for block indicator
|
||||||
|
if m: # next section
|
||||||
|
tag = m.group(1)
|
||||||
|
tagID += 1
|
||||||
|
format['specials']['brothers'].append(tag)
|
||||||
|
if tag == me or (me.isdigit() and tagID == int(me)):
|
||||||
|
format['specials']['_id'] = tagID
|
||||||
|
format['outputs'] = []
|
||||||
|
tag = me
|
||||||
|
else: # data from section
|
||||||
|
if tag == me:
|
||||||
|
(output,length) = line.split()
|
||||||
|
output.lower()
|
||||||
|
if length.isdigit():
|
||||||
|
length = int(length)
|
||||||
|
if re.match("\((.+)\)",output): # special data, e.g. (Ngrains)
|
||||||
|
format['specials'][output] = length
|
||||||
|
elif length > 0:
|
||||||
|
format['outputs'].append([output,length])
|
||||||
|
return format
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def ParsePostfile(p,filename, outputFormat):
|
||||||
|
#
|
||||||
|
# parse postfile in order to get position and labels of outputs
|
||||||
|
# needs "outputFormat" for mapping of output names to postfile output indices
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
# --- build statistics
|
||||||
|
|
||||||
|
stat = { \
|
||||||
|
'IndexOfLabel': {}, \
|
||||||
|
'Title': p.title(), \
|
||||||
|
'Extrapolation': p.extrapolate, \
|
||||||
|
'NumberOfIncrements': p.increments(), \
|
||||||
|
'NumberOfNodes': p.nodes(), \
|
||||||
|
'NumberOfNodalScalars': p.node_scalars(), \
|
||||||
|
'LabelOfNodalScalar': [None]*p.node_scalars() , \
|
||||||
|
'NumberOfElements': p.elements(), \
|
||||||
|
'NumberOfElementalScalars': p.element_scalars(), \
|
||||||
|
'LabelOfElementalScalar': [None]*p.element_scalars() , \
|
||||||
|
'NumberOfElementalTensors': p.element_tensors(), \
|
||||||
|
'LabelOfElementalTensor': [None]*p.element_tensors(), \
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- find labels
|
||||||
|
|
||||||
|
for labelIndex in range(stat['NumberOfNodalScalars']):
|
||||||
|
label = p.node_scalar_label(labelIndex)
|
||||||
|
stat['IndexOfLabel'][label] = labelIndex
|
||||||
|
stat['LabelOfNodalScalar'][labelIndex] = label
|
||||||
|
|
||||||
|
for labelIndex in range(stat['NumberOfElementalScalars']):
|
||||||
|
label = p.element_scalar_label(labelIndex)
|
||||||
|
stat['IndexOfLabel'][label] = labelIndex
|
||||||
|
stat['LabelOfElementalScalar'][labelIndex] = label
|
||||||
|
|
||||||
|
for labelIndex in range(stat['NumberOfElementalTensors']):
|
||||||
|
label = p.element_tensor_label(labelIndex)
|
||||||
|
stat['IndexOfLabel'][label] = labelIndex
|
||||||
|
stat['LabelOfElementalTensor'][labelIndex] = label
|
||||||
|
|
||||||
|
if 'User Defined Variable 1' in stat['IndexOfLabel']:
|
||||||
|
stat['IndexOfLabel']['GrainCount'] = stat['IndexOfLabel']['User Defined Variable 1']
|
||||||
|
|
||||||
|
if 'GrainCount' in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all?
|
||||||
|
startIndex = stat['IndexOfLabel']['GrainCount'] - 1
|
||||||
|
|
||||||
|
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
|
||||||
|
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
|
||||||
|
# given in the .output* file
|
||||||
|
|
||||||
|
offset = 0
|
||||||
|
stat['LabelOfElementalScalar'][startIndex + 2 + offset] = 'HomogenizationCount'
|
||||||
|
for var in outputFormat['Homogenization']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
for i in range(var[1]):
|
||||||
|
stat['IndexOfLabel']['%i_%s'%(i+1,var[0])] = startIndex + 2 + offset + (i+1)
|
||||||
|
else:
|
||||||
|
stat['IndexOfLabel']['%s'%(var[0])] = startIndex + 2 + offset + 1
|
||||||
|
offset += var[1]
|
||||||
|
|
||||||
|
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
|
||||||
|
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + 3 + offset
|
||||||
|
for var in outputFormat['Crystallite']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
for i in range(var[1]):
|
||||||
|
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + 3 + offset + (i+1)
|
||||||
|
else:
|
||||||
|
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + 3 + offset + 1
|
||||||
|
offset += var[1]
|
||||||
|
|
||||||
|
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + 4 + offset
|
||||||
|
for var in outputFormat['Constitutive']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
for i in range(var[1]):
|
||||||
|
stat['IndexOfLabel']['%i_%i_%s'%(grain+1,i+1,var[0])] = startIndex + 4 + offset + (i+1)
|
||||||
|
else:
|
||||||
|
stat['IndexOfLabel']['%i_%s'%(grain+1,var[0])] = startIndex + 4 + offset + 1
|
||||||
|
offset += var[1]
|
||||||
|
|
||||||
|
return stat
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def SummarizePostfile(stat,where=sys.stdout):
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
where.write('title:\t%s'%stat['Title'] + '\n\n')
|
||||||
|
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
|
||||||
|
where.write('increments:\t%i+1'%(stat['NumberOfIncrements']-1) + '\n\n')
|
||||||
|
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
|
||||||
|
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
|
||||||
|
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
|
||||||
|
where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n ' + '\n '.join(stat['LabelOfElementalScalar']) + '\n\n')
|
||||||
|
where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n ' + '\n '.join(stat['LabelOfElementalTensor']) + '\n\n')
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# MAIN FUNCTION STARTS HERE
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
# --- input parsing
|
||||||
|
|
||||||
|
parser = OptionParser(option_class=MyOption, usage='%prog [options] resultfile', description = """
|
||||||
|
Extract data from a .t16 MSC.Marc results file.
|
||||||
|
|
||||||
|
List of output variables is given by options '--ns','--es','--et','--ho','--cr','--co'.
|
||||||
|
|
||||||
|
Filter and separations use 'elem','node','grain', and 'x','y','z' as key words.
|
||||||
|
Example:
|
||||||
|
1) get averaged results in slices perpendicular to x for all positive y coordinates
|
||||||
|
--filter 'y >= 0.0' --separation x --map 'avg'
|
||||||
|
2) global sum of squared data falling into first quadrant arc between R1 and R2
|
||||||
|
--filter 'x*x + y*y >= R1*R1 and x*x + y*y <= R2*R2' --map 'lambda list: sum([item*item for item in list])'
|
||||||
|
|
||||||
|
$Id: postResults 205 2010-06-08 15:23:31Z MPIE\p.eisenlohr $
|
||||||
|
""")
|
||||||
|
|
||||||
|
parser.add_option('-i','--info', action='store_true', dest='info', \
|
||||||
|
help='list contents of resultfile [%default]')
|
||||||
|
parser.add_option('-d','--dir', dest='directory', \
|
||||||
|
help='name of subdirectory to hold output [%default]')
|
||||||
|
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
|
||||||
|
help='range of increments to output (start, end, step) [all]')
|
||||||
|
parser.add_option('-m','--map', dest='func', type='string', \
|
||||||
|
help='data reduction mapping ["%default"] out of min, max, avg, sum or user-lambda')
|
||||||
|
|
||||||
|
group_material = OptionGroup(parser,'Material identifier')
|
||||||
|
group_special = OptionGroup(parser,'Special outputs')
|
||||||
|
group_general = OptionGroup(parser,'General outputs')
|
||||||
|
|
||||||
|
group_material.add_option('--homogenization', dest='homog', type='string', \
|
||||||
|
help='homogenization identifier (as string or integer [%default])')
|
||||||
|
group_material.add_option('--crystallite', dest='cryst', type='string', \
|
||||||
|
help='crystallite identifier (as string or integer [%default])')
|
||||||
|
group_material.add_option('--phase', dest='phase', type='string', \
|
||||||
|
help='phase identifier (as string or integer [%default])')
|
||||||
|
|
||||||
|
group_special.add_option('-t','--time', action='store_true', dest='time', \
|
||||||
|
help='output time of increment [%default]')
|
||||||
|
group_special.add_option('-f','--filter', dest='filter', type='string', \
|
||||||
|
help='condition(s) to filter results [%default]')
|
||||||
|
group_special.add_option('--separation', action='extend', dest='separation', type='string', \
|
||||||
|
help='properties to separate results [%default]')
|
||||||
|
parser.add_option('-s','--split', action='store_true', dest='separateFiles', \
|
||||||
|
help='split output per increment [%default]')
|
||||||
|
|
||||||
|
group_general.add_option('--ns', action='extend', dest='nodalScalar', type='string', \
|
||||||
|
help='list of nodal scalars to extract')
|
||||||
|
group_general.add_option('--es', action='extend', dest='elementalScalar', type='string', \
|
||||||
|
help='list of elemental scalars to extract')
|
||||||
|
group_general.add_option('--et', action='extend', dest='elementalTensor', type='string', \
|
||||||
|
help='list of elemental tensors to extract')
|
||||||
|
group_general.add_option('--ho', action='extend', dest='homogenizationResult', type='string', \
|
||||||
|
help='list of homogenization results to extract')
|
||||||
|
group_general.add_option('--cr', action='extend', dest='crystalliteResult', type='string', \
|
||||||
|
help='list of crystallite results to extract')
|
||||||
|
group_general.add_option('--co', action='extend', dest='constitutiveResult', type='string', \
|
||||||
|
help='list of constitutive results to extract')
|
||||||
|
|
||||||
|
parser.add_option_group(group_material)
|
||||||
|
parser.add_option_group(group_general)
|
||||||
|
parser.add_option_group(group_special)
|
||||||
|
|
||||||
|
parser.set_defaults(info = False)
|
||||||
|
parser.set_defaults(directory = 'postProc')
|
||||||
|
parser.set_defaults(func = 'avg')
|
||||||
|
parser.set_defaults(homog = '1')
|
||||||
|
parser.set_defaults(cryst = '1')
|
||||||
|
parser.set_defaults(phase = '1')
|
||||||
|
parser.set_defaults(filter = '')
|
||||||
|
parser.set_defaults(separation = [])
|
||||||
|
parser.set_defaults(inc = False)
|
||||||
|
parser.set_defaults(time = False)
|
||||||
|
parser.set_defaults(separateFiles = False)
|
||||||
|
|
||||||
|
(options, file) = parser.parse_args()
|
||||||
|
|
||||||
|
bg = backgroundMessage()
|
||||||
|
bg.start()
|
||||||
|
|
||||||
|
|
||||||
|
# --- sanity checks
|
||||||
|
|
||||||
|
if not file:
|
||||||
|
parser.print_help()
|
||||||
|
parser.error('no file specified...')
|
||||||
|
|
||||||
|
if options.constitutiveResult and not options.phase:
|
||||||
|
parser.print_help()
|
||||||
|
parser.error('constitutive results require phase...')
|
||||||
|
|
||||||
|
if options.nodalScalar and ( options.elementalScalar or options.elementalTensor
|
||||||
|
or options.homogenizationResult or options.crystalliteResult or options.constitutiveResult ):
|
||||||
|
parser.print_help()
|
||||||
|
parser.error('not allowed to mix nodal with elemental results...')
|
||||||
|
|
||||||
|
|
||||||
|
# --- parse .output and .t16 files
|
||||||
|
|
||||||
|
bg.set_message('parsing .output and .t16 files...')
|
||||||
|
|
||||||
|
filename = os.path.splitext(file[0])[0]
|
||||||
|
dirname = os.path.abspath(os.path.dirname(filename))+os.sep+options.directory
|
||||||
|
if not os.path.isdir(dirname):
|
||||||
|
os.mkdir(dirname,0755)
|
||||||
|
|
||||||
|
outputFormat = {}
|
||||||
|
me = {
|
||||||
|
'Homogenization': options.homog,
|
||||||
|
'Crystallite': options.cryst,
|
||||||
|
'Constitutive': options.phase,
|
||||||
|
}
|
||||||
|
for what in me:
|
||||||
|
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
|
||||||
|
if not '_id' in outputFormat[what]['specials']:
|
||||||
|
print "'%s' not found in <%s>"%(me[what], what)
|
||||||
|
print '\n'.join(map(lambda x:' '+x, outputFormat[what]['specials']['brothers']))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
p = OpenPostfile(filename+'.t16')
|
||||||
|
stat = ParsePostfile(p, filename, outputFormat)
|
||||||
|
|
||||||
|
|
||||||
|
# --- sanity check for output variables
|
||||||
|
# for mentat variables (nodalScalar,elementalScalar,elementalTensor) we simply have to check whether the label is found in the stat[indexOfLabel] dictionary
|
||||||
|
# for user defined variables (homogenizationResult,crystalliteResult,constitutiveResult) we have to check the corresponding outputFormat, since the namescheme in stat['IndexOfLabel'] is different
|
||||||
|
|
||||||
|
for opt in ['nodalScalar','elementalScalar','elementalTensor','homogenizationResult','crystalliteResult','constitutiveResult']:
|
||||||
|
if eval('options.%s'%opt):
|
||||||
|
for label in eval('options.%s'%opt):
|
||||||
|
if (opt in ['nodalScalar','elementalScalar','elementalTensor'] and not label in stat['IndexOfLabel']) \
|
||||||
|
or (opt in ['homogenizationResult','crystalliteResult','constitutiveResult'] \
|
||||||
|
and (not outputFormat[opt[:-6].capitalize()]['outputs'] or not label in zip(*outputFormat[opt[:-6].capitalize()]['outputs'])[0])):
|
||||||
|
parser.error('%s "%s" unknown...'%(opt,label))
|
||||||
|
|
||||||
|
|
||||||
|
# --- output info
|
||||||
|
|
||||||
|
if options.info:
|
||||||
|
print '\nMentat release %s\n'%release
|
||||||
|
SummarizePostfile(stat,sys.stderr)
|
||||||
|
|
||||||
|
print '\nUser Defined Outputs'
|
||||||
|
for what in me:
|
||||||
|
print '\n ',what,':'
|
||||||
|
for output in outputFormat[what]['outputs']:
|
||||||
|
print ' ',output
|
||||||
|
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
# --- get output data from .t16 file
|
||||||
|
|
||||||
|
|
||||||
|
if options.range:
|
||||||
|
increments = range( max(0,options.range[0]),
|
||||||
|
min(stat['NumberOfIncrements'],options.range[1]+1),
|
||||||
|
options.range[2])
|
||||||
|
else:
|
||||||
|
increments = range(stat['NumberOfIncrements']-1)
|
||||||
|
|
||||||
|
fileOpen = False
|
||||||
|
assembleHeader = True
|
||||||
|
header = []
|
||||||
|
|
||||||
|
for increment in increments:
|
||||||
|
p.moveto(increment+1)
|
||||||
|
bg.set_message('read data from increment %i...'%increment)
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
if options.nodalScalar:
|
||||||
|
for n in range(stat['NumberOfNodes']):
|
||||||
|
nodeID = p.node_id(n)
|
||||||
|
nodeCoordinates = [p.node(n).x, p.node(n).y, p.node(n).z]
|
||||||
|
elemID = 0
|
||||||
|
grainID = 0
|
||||||
|
|
||||||
|
# --- filter valid locations
|
||||||
|
|
||||||
|
filter = substituteLocation(options.filter, [elemID,nodeID,grainID], nodeCoordinates) # generates an expression that is only true for the locations specified by options.filter
|
||||||
|
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
|
||||||
|
continue # ... ignore this data point and continue with next
|
||||||
|
|
||||||
|
# --- group data locations
|
||||||
|
|
||||||
|
group = substituteLocation('#'.join(options.separation), [elemID,nodeID,grainID], nodeCoordinates) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||||
|
if group not in data: # create a new group if not yet present
|
||||||
|
data[group] = []
|
||||||
|
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': 'location',
|
||||||
|
'content': [elemID,nodeID,grainID] + nodeCoordinates,
|
||||||
|
}) # first entry in this list always contains the location data
|
||||||
|
|
||||||
|
# --- get data from t16 file
|
||||||
|
|
||||||
|
for label in options.nodalScalar:
|
||||||
|
if assembleHeader:
|
||||||
|
header.append(label.replace(' ',''))
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ p.node_scalar(n,stat['IndexOfLabel'][label]) ],
|
||||||
|
})
|
||||||
|
|
||||||
|
assembleHeader = False
|
||||||
|
|
||||||
|
else:
|
||||||
|
for e in range(stat['NumberOfElements']):
|
||||||
|
nodeCoordinates = map(lambda node: [node.x, node.y, node.z], map(p.node, map(p.node_sequence,p.element(e).items)))
|
||||||
|
ipCoordinates = ipCoords(p.element(e).type, nodeCoordinates)
|
||||||
|
elemID = p.element_id(e)
|
||||||
|
for n in range(p.element(e).len):
|
||||||
|
nodeID = p.element(e).items[n]
|
||||||
|
for g in range(('GrainCount' in stat['IndexOfLabel'] and int(p.element_scalar(e, stat['IndexOfLabel']['GrainCount'])[0].value))
|
||||||
|
or 1):
|
||||||
|
grainID = g + 1
|
||||||
|
|
||||||
|
# --- filter valid locations
|
||||||
|
|
||||||
|
filter = substituteLocation(options.filter, [elemID,nodeID,grainID], ipCoordinates[n]) # generates an expression that is only true for the locations specified by options.filter
|
||||||
|
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
|
||||||
|
continue # ... ignore this data point and continue with next
|
||||||
|
|
||||||
|
# --- group data locations
|
||||||
|
|
||||||
|
group = substituteLocation('#'.join(options.separation), [elemID,nodeID,grainID], ipCoordinates[n]) # generates a unique key for a group of separated data based on the separation criterium for the location
|
||||||
|
if group not in data: # create a new group if not yet present
|
||||||
|
data[group] = []
|
||||||
|
data[group].append([]) # append a new list for each group member; each list will contain dictionaries with keys 'label, and 'content' for the associated data
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': 'location',
|
||||||
|
'content': [elemID,nodeID,grainID] + ipCoordinates[n],
|
||||||
|
}) # first entry in this list always contains the location data
|
||||||
|
|
||||||
|
# --- get data from t16 file
|
||||||
|
|
||||||
|
if options.elementalScalar:
|
||||||
|
for label in options.elementalScalar:
|
||||||
|
if assembleHeader:
|
||||||
|
header.append(label.replace(' ',''))
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel'][label])[n].value ],
|
||||||
|
})
|
||||||
|
|
||||||
|
if options.elementalTensor:
|
||||||
|
for label in options.elementalTensor:
|
||||||
|
if assembleHeader:
|
||||||
|
header += ['%s.%s'%(label.replace(' ',''),component) for component in ['intensity','t11','t22','t33','t12','t23','t13']]
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ eval("p.element_tensor(e,stat['IndexOfLabel'][label])[n].%s"%component)
|
||||||
|
for component in ['intensity','t11','t22','t33','t12','t23','t13'] ],
|
||||||
|
})
|
||||||
|
|
||||||
|
if options.homogenizationResult:
|
||||||
|
for label in options.homogenizationResult:
|
||||||
|
outputIndex = list(zip(*outputFormat['Homogenization']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||||
|
length = int(outputFormat['Homogenization']['outputs'][outputIndex][1])
|
||||||
|
if length > 1:
|
||||||
|
if assembleHeader:
|
||||||
|
header += ['%i_%s'%(component+1,label) for component in range(length)]
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(component+1,label)])[n].value
|
||||||
|
for component in range(length) ],
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
if assembleHeader:
|
||||||
|
header.append(label)
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%s'%label])[n].value ],
|
||||||
|
})
|
||||||
|
|
||||||
|
if options.crystalliteResult:
|
||||||
|
for label in options.crystalliteResult:
|
||||||
|
outputIndex = list(zip(*outputFormat['Crystallite']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||||
|
length = int(outputFormat['Crystallite']['outputs'][outputIndex][1])
|
||||||
|
if length > 1:
|
||||||
|
if assembleHeader:
|
||||||
|
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)]
|
||||||
|
data[group][-1].append({
|
||||||
|
'label': label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
|
||||||
|
for component in range(length) ],
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
if assembleHeader:
|
||||||
|
header.append('%i_%s'%(g+1,label))
|
||||||
|
data[group][-1].append({
|
||||||
|
'label':label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
|
||||||
|
})
|
||||||
|
|
||||||
|
if options.constitutiveResult:
|
||||||
|
for label in options.constitutiveResult:
|
||||||
|
outputIndex = list(zip(*outputFormat['Constitutive']['outputs'])[0]).index(label) # find the position of this output in the outputFormat
|
||||||
|
length = int(outputFormat['Constitutive']['outputs'][outputIndex][1])
|
||||||
|
if length > 1:
|
||||||
|
if assembleHeader:
|
||||||
|
header += ['%i_%i_%s'%(g+1,component+1,label) for component in range(length)]
|
||||||
|
data[group][-1].append({
|
||||||
|
'label':label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%i_%s'%(g+1,component+1,label)])[n].value
|
||||||
|
for component in range(length) ],
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
if assembleHeader:
|
||||||
|
header.append('%i_%s'%(g+1,label))
|
||||||
|
data[group][-1].append({
|
||||||
|
'label':label,
|
||||||
|
'content': [ p.element_scalar(e,stat['IndexOfLabel']['%i_%s'%(g+1,label)])[n].value ],
|
||||||
|
})
|
||||||
|
|
||||||
|
assembleHeader = False
|
||||||
|
|
||||||
|
if options.separateFiles:
|
||||||
|
if fileOpen:
|
||||||
|
file.close()
|
||||||
|
fileOpen = False
|
||||||
|
outFilename = eval('"'+eval("'%%s_inc%%0%ii.txt'%(math.log10(max(increments))+1)")+'"%(dirname + os.sep + os.path.split(filename)[1],increment)')
|
||||||
|
else:
|
||||||
|
outFilename = '%s.txt'%(dirname + os.sep + os.path.split(filename)[1])
|
||||||
|
|
||||||
|
# --- write header to file
|
||||||
|
|
||||||
|
if not fileOpen:
|
||||||
|
file = open(outFilename,'w')
|
||||||
|
fileOpen = True
|
||||||
|
file.write('2\theader\n')
|
||||||
|
file.write('$Id: postResults 205 2010-06-08 15:23:31Z MPIE\p.eisenlohr $\n')
|
||||||
|
if options.time:
|
||||||
|
basic = ['inc','time']
|
||||||
|
else:
|
||||||
|
basic = ['inc']
|
||||||
|
if options.nodalScalar:
|
||||||
|
file.write('\t'.join(basic + ['elem','node','grain','node.x','node.y','node.z'] + header) + '\n')
|
||||||
|
else:
|
||||||
|
file.write('\t'.join(basic + ['elem','node','grain','ip.x','ip.y','ip.z'] + header) + '\n')
|
||||||
|
|
||||||
|
# --- write data to file
|
||||||
|
|
||||||
|
output = []
|
||||||
|
for group in data:
|
||||||
|
if options.time:
|
||||||
|
output.append([increment, p.time])
|
||||||
|
else:
|
||||||
|
output.append([increment])
|
||||||
|
for chunk in range(len(data[group][0])):
|
||||||
|
label = data[group][0][chunk]['label'] # name of chunk (e.g. 'orientation', or 'flow stress')
|
||||||
|
groupContent = [data[group][member][chunk]['content'] for member in range(len(data[group]))] # list of each member's chunk
|
||||||
|
if label == 'location':
|
||||||
|
condensedGroupContent = mapFunc(label, groupContent, 'avg') # always average location
|
||||||
|
if len(groupContent) > 1: # e,n,g nonsense if averaged over more than one entry...
|
||||||
|
condensedGroupContent[:3] = ['n/a']*3 # ...so return 'n/a'
|
||||||
|
else:
|
||||||
|
condensedGroupContent = mapFunc(label, groupContent, options.func) # map function to groupContent to get condensed data of this group's chunk
|
||||||
|
output[-1] += condensedGroupContent
|
||||||
|
|
||||||
|
for groupvalues in sortBySeparation(output, options.separation, int(options.time)): # sort output according to separation criteria
|
||||||
|
file.write('\t'.join(map(str,groupvalues)) + '\n')
|
||||||
|
|
||||||
|
if fileOpen:
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------- DONE --------------------------------
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
'''
|
||||||
|
Writes meaningful labels to the marc input file (*.dat)
|
||||||
|
based on the files
|
||||||
|
<modelname_jobname>.output<Homogenization/Crystallite/Constitutive>
|
||||||
|
that are written during the first run of the model.
|
||||||
|
'''
|
||||||
|
import sys,os,re
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def ParseOutputFormat(filename,what,me):
|
||||||
|
# -----------------------------
|
||||||
|
format = {'outputs':{},'specials':{'brothers':[]}}
|
||||||
|
|
||||||
|
outputmetafile = filename+'.output'+what
|
||||||
|
try:
|
||||||
|
file = open(outputmetafile)
|
||||||
|
except:
|
||||||
|
print('Could not open file %s'%outputmetafile)
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
content = file.readlines()
|
||||||
|
file.close()
|
||||||
|
|
||||||
|
tag = ''
|
||||||
|
tagID = 0
|
||||||
|
for line in content:
|
||||||
|
if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments
|
||||||
|
continue
|
||||||
|
m = re.match("\[(.+)\]",line) # look for block indicator
|
||||||
|
if m: # next section
|
||||||
|
tag = m.group(1)
|
||||||
|
tagID += 1
|
||||||
|
format['specials']['brothers'].append(tag)
|
||||||
|
if tag == me or (me.isdigit() and tagID == int(me)):
|
||||||
|
format['specials']['_id'] = tagID
|
||||||
|
format['outputs'] = []
|
||||||
|
tag = me
|
||||||
|
else: # data from section
|
||||||
|
if tag == me:
|
||||||
|
(output,length) = line.split()
|
||||||
|
output.lower()
|
||||||
|
if length.isdigit():
|
||||||
|
length = int(length)
|
||||||
|
if re.match("\((.+)\)",output): # special data, (e.g. (Ngrains)
|
||||||
|
format['specials'][output] = length
|
||||||
|
elif length > 0:
|
||||||
|
format['outputs'].append([output,length])
|
||||||
|
return format
|
||||||
|
|
||||||
|
|
||||||
|
parser = OptionParser(usage='%prog [options] Marc.inputfile(s)', description="""
|
||||||
|
what I do...
|
||||||
|
""")
|
||||||
|
parser.add_option('-n','--number', dest='number', type='int', \
|
||||||
|
help='maximum requested User Defined Variable [%default]')
|
||||||
|
parser.add_option('--homogenization', dest='homog', type='string', \
|
||||||
|
help='homogenization identifier (as string or integer [%default])')
|
||||||
|
parser.add_option('--crystallite', dest='cryst', type='string', \
|
||||||
|
help='crystallite identifier (as string or integer [%default])')
|
||||||
|
parser.add_option('--phase', dest='phase', type='string', \
|
||||||
|
help='phase identifier (as string or integer [%default])')
|
||||||
|
parser.add_option('--use', dest='useFile', type='string', \
|
||||||
|
help='Optionally parse output descriptors from '+
|
||||||
|
'different <model_job>.outputZZZ file. Saves the effort '+
|
||||||
|
'to start a calculation for each job [%default])')
|
||||||
|
parser.set_defaults(number = 0)
|
||||||
|
parser.set_defaults(homog = '1')
|
||||||
|
parser.set_defaults(cryst = '1')
|
||||||
|
parser.set_defaults(phase = '1')
|
||||||
|
parser.set_defaults(useFile = '')
|
||||||
|
|
||||||
|
(options, files) = parser.parse_args()
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
parser.print_help()
|
||||||
|
parser.error('no file(s) specified...')
|
||||||
|
|
||||||
|
me = { 'Homogenization': options.homog,
|
||||||
|
'Crystallite': options.cryst,
|
||||||
|
'Constitutive': options.phase,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for file in files:
|
||||||
|
if options.useFile != '':
|
||||||
|
formatFile = os.path.splitext(options.useFile)[0]
|
||||||
|
else:
|
||||||
|
formatFile = os.path.splitext(file)[0]
|
||||||
|
file = os.path.splitext(file)[0]+'.dat'
|
||||||
|
if not os.path.lexists(file):
|
||||||
|
print file,'not found'
|
||||||
|
continue
|
||||||
|
|
||||||
|
print('Scanning format files of: %s'%formatFile)
|
||||||
|
|
||||||
|
if options.number < 1:
|
||||||
|
outputFormat = {}
|
||||||
|
|
||||||
|
for what in me:
|
||||||
|
outputFormat[what] = ParseOutputFormat(formatFile,what,me[what])
|
||||||
|
if not '_id' in outputFormat[what]['specials']:
|
||||||
|
print "'%s' not found in <%s>"%(me[what],what)
|
||||||
|
print '\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers']))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
UserVars = ['GrainCount']
|
||||||
|
|
||||||
|
UserVars += ['HomogenizationCount']
|
||||||
|
for var in outputFormat['Homogenization']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
UserVars += ['%i_%s'%(i+1,var[0]) for i in range(var[1])]
|
||||||
|
else:
|
||||||
|
UserVars += ['%s'%(var[0]) for i in range(var[1])]
|
||||||
|
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
|
||||||
|
UserVars += ['%i_CrystalliteCount'%(grain+1)]
|
||||||
|
for var in outputFormat['Crystallite']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
UserVars += ['%i_%i_%s'%(grain+1,i+1,var[0]) for i in range(var[1])]
|
||||||
|
else:
|
||||||
|
UserVars += ['%i_%s'%(grain+1,var[0]) for i in range(var[1])]
|
||||||
|
|
||||||
|
UserVars += ['%i_ConstitutiveCount'%(grain+1)]
|
||||||
|
for var in outputFormat['Constitutive']['outputs']:
|
||||||
|
if var[1] > 1:
|
||||||
|
UserVars += ['%i_%i_%s'%(grain+1,i+1,var[0]) for i in range(var[1])]
|
||||||
|
else:
|
||||||
|
UserVars += ['%i_%s'%(grain+1,var[0]) for i in range(var[1])]
|
||||||
|
|
||||||
|
# Now change *.dat file(s)
|
||||||
|
print('Adding labels to: %s'%file)
|
||||||
|
inFile = open(file)
|
||||||
|
input = inFile.readlines()
|
||||||
|
inFile.close()
|
||||||
|
output = open(file,'w')
|
||||||
|
thisSection = ''
|
||||||
|
for line in input:
|
||||||
|
m = re.match('(\w+)\s',line)
|
||||||
|
if m:
|
||||||
|
lastSection = thisSection
|
||||||
|
thisSection = m.group(1)
|
||||||
|
if (lastSection == 'post' and thisSection == 'parameters'):
|
||||||
|
if options.number > 0:
|
||||||
|
for i in range(options.number):
|
||||||
|
output.write('%10i%10i\n'%(-i-1,0))
|
||||||
|
else:
|
||||||
|
for i in range(len(UserVars)):
|
||||||
|
output.write('%10i%10i%s\n'%(-i-1,0,UserVars[i]))
|
||||||
|
if (thisSection != 'post' or not re.match('\s*\-',line)):
|
||||||
|
output.write(line)
|
||||||
|
output.close()
|
|
@ -0,0 +1,180 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
from colorsys import *
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
releases = ['2010','2008r1','2007r1','2005r3']
|
||||||
|
|
||||||
|
for release in releases:
|
||||||
|
libPath = '/msc/mentat%s/shlib/'%release
|
||||||
|
if os.path.exists(libPath):
|
||||||
|
sys.path.append(libPath)
|
||||||
|
for subdir in [os.path.join(libPath,file)
|
||||||
|
for file in os.listdir(libPath)
|
||||||
|
if os.path.isdir(os.path.join(libPath,file))]:
|
||||||
|
sys.path.append(subdir)
|
||||||
|
break
|
||||||
|
from py_mentat import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def outMentat(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
py_send(cmd)
|
||||||
|
else:
|
||||||
|
py_send(cmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def outStdout(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
print cmd
|
||||||
|
else:
|
||||||
|
print cmd
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def output(cmds,locals,dest):
|
||||||
|
for cmd in cmds:
|
||||||
|
if isinstance(cmd,list):
|
||||||
|
output(cmd,locals,dest)
|
||||||
|
else:
|
||||||
|
{\
|
||||||
|
'Mentat': outMentat,\
|
||||||
|
'Stdout': outStdout,\
|
||||||
|
}[dest](cmd,locals)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def lever(val0, val1, x):
|
||||||
|
return val0 + (val1 - val0) * x
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def symlever(comp, val0, val1, x):
|
||||||
|
if comp == "hue":
|
||||||
|
return lever(val0, val1, x)
|
||||||
|
|
||||||
|
if comp == "lightness":
|
||||||
|
val_middle = max(0.9, val0, val1)
|
||||||
|
elif comp == "saturation":
|
||||||
|
val_middle = min(0.1, val0, val1)
|
||||||
|
if x < 0.5:
|
||||||
|
return lever(val0, val_middle, 2*x)
|
||||||
|
else:
|
||||||
|
return lever(val_middle, val1, 2*x-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
def colorMap(colors):
|
||||||
|
cmds = [ "*color %i %f %f %f"%(idx+32,color[0],color[1],color[2])
|
||||||
|
for idx,color in enumerate(colors) ]
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# MAIN FUNCTION STARTS HERE
|
||||||
|
# -----------------------------
|
||||||
|
|
||||||
|
parser = OptionParser(usage="%prog [options] lower_hls upper_hls", description = """
|
||||||
|
Changes the color map in mentat.
|
||||||
|
|
||||||
|
Interpolates colors between "lower_hls" and "upper_hls".
|
||||||
|
For symmetric scales use option "-s".
|
||||||
|
|
||||||
|
Example colors:
|
||||||
|
- Non-symmetric scales: 0.167,0.9,0.1 0.167,0.1,0.9
|
||||||
|
- Symmetric scales: 0,0.2,0.9 0.333,0.2,0.9
|
||||||
|
""")
|
||||||
|
|
||||||
|
parser.add_option("-s","--symmetric", action = "store_true",
|
||||||
|
dest = "symmetric", \
|
||||||
|
help = "symmetric legend [%default]")
|
||||||
|
parser.add_option("-p", "--port", type = "int",\
|
||||||
|
dest = "port",\
|
||||||
|
help = "Mentat connection port [%default]")
|
||||||
|
parser.add_option("-v", "--verbose", action="store_true",\
|
||||||
|
dest = "verbose",\
|
||||||
|
help = "write Mentat command stream also to stdout [%default]")
|
||||||
|
parser.set_defaults(symmetric = False)
|
||||||
|
parser.set_defaults(port=40007)
|
||||||
|
parser.set_defaults(verbose=False)
|
||||||
|
|
||||||
|
|
||||||
|
(options, vars) = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### read hlsColors and check if they are valid hls values
|
||||||
|
|
||||||
|
try:
|
||||||
|
hlsColor_bounds = [[],[]]
|
||||||
|
for i in range(2):
|
||||||
|
hlsColor_bounds[i] = map(float, vars[i].split(","))
|
||||||
|
|
||||||
|
if len(hlsColor_bounds[i]) <> 3:
|
||||||
|
raise
|
||||||
|
|
||||||
|
hlsColors_limits = [[0,0,0],[1,1,1]]
|
||||||
|
for j in range(3):
|
||||||
|
if hlsColor_bounds[i][j] < hlsColors_limits[0][j] or hlsColor_bounds[i][j] > hlsColors_limits[1][j]:
|
||||||
|
raise
|
||||||
|
|
||||||
|
except:
|
||||||
|
parser.error("give lower and upper hlsColor as comma separated values")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### interpolate hls values
|
||||||
|
|
||||||
|
nColors = 32
|
||||||
|
if options.symmetric:
|
||||||
|
hlsColors = [ [ symlever(comp, hlsColor_bounds[0][j], hlsColor_bounds[1][j], float(idx)/(nColors-1))
|
||||||
|
for j,comp in enumerate(["hue","lightness","saturation"]) ]
|
||||||
|
for idx in range(nColors) ]
|
||||||
|
else:
|
||||||
|
hlsColors = [ [ lever(hlsColor_bounds[0][j], hlsColor_bounds[1][j], float(idx)/(nColors-1))
|
||||||
|
for j,comp in enumerate(["hue","lightness","saturation"]) ]
|
||||||
|
for idx in range(nColors) ]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### convert to rgb values
|
||||||
|
|
||||||
|
rgbColors = [ hls_to_rgb(hlsColor[0], hlsColor[1], hlsColor[2])
|
||||||
|
for hlsColor in hlsColors ]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### connect to mentat and change colorMap
|
||||||
|
|
||||||
|
outputLocals = {}
|
||||||
|
print 'waiting to connect...'
|
||||||
|
py_connect('',options.port)
|
||||||
|
print 'connected...'
|
||||||
|
|
||||||
|
cmds = colorMap(rgbColors)
|
||||||
|
output(cmds,outputLocals,'Mentat')
|
||||||
|
py_disconnect()
|
||||||
|
|
||||||
|
if options.verbose:
|
||||||
|
output(cmds,outputLocals,'Stdout')
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,757 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
releases = ['2010r1','2008r1','2007r1','2005r3']
|
||||||
|
|
||||||
|
|
||||||
|
import sys,os,pwd,math,re
|
||||||
|
#import Image,ImageDraw
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
for release in releases:
|
||||||
|
libPath = '/msc/mentat%s/shlib/'%release
|
||||||
|
if os.path.exists(libPath):
|
||||||
|
sys.path.append(libPath)
|
||||||
|
break
|
||||||
|
from py_mentat import *
|
||||||
|
|
||||||
|
|
||||||
|
def outMentat(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
py_send(cmd)
|
||||||
|
else:
|
||||||
|
py_send(cmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
def outStdout(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
print cmd
|
||||||
|
else:
|
||||||
|
print cmd
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def output(cmds,locals,dest):
|
||||||
|
for cmd in cmds:
|
||||||
|
if isinstance(cmd,list):
|
||||||
|
output(cmd,locals,dest)
|
||||||
|
else:
|
||||||
|
{\
|
||||||
|
'Mentat': outMentat,\
|
||||||
|
'Stdout': outStdout,\
|
||||||
|
}[dest](cmd,locals)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def rcbOrientationParser(content):
|
||||||
|
|
||||||
|
grains = []
|
||||||
|
myOrientation = [0.0,0.0,0.0]
|
||||||
|
for line in content:
|
||||||
|
if line[0] != '#': # skip comments
|
||||||
|
for grain in range(2):
|
||||||
|
myID = int(line.split()[12+grain]) # get grain id
|
||||||
|
myOrientation = map(float,line.split())[3*grain:3+3*grain] # get orientation
|
||||||
|
if len(grains) < myID:
|
||||||
|
for i in range(myID-len(grains)): # extend list to necessary length
|
||||||
|
grains.append([0.0,0.0,0.0])
|
||||||
|
grains[myID-1] = myOrientation # store Euler angles
|
||||||
|
|
||||||
|
return grains
|
||||||
|
|
||||||
|
def rcbParser(content,size,tolerance,imagename,imagesize,border): # parser for TSL-OIM reconstructed boundary files
|
||||||
|
|
||||||
|
# find bounding box
|
||||||
|
|
||||||
|
boxX = [1.*sys.maxint,-1.*sys.maxint]
|
||||||
|
boxY = [1.*sys.maxint,-1.*sys.maxint]
|
||||||
|
x = [0.,0.]
|
||||||
|
y = [0.,0.]
|
||||||
|
for line in content:
|
||||||
|
if line[0] != '#': # skip comments
|
||||||
|
(x[0],y[0],x[1],y[1]) = map(float,line.split())[8:12] # get start and end coordinates of each segment
|
||||||
|
boxX[0] = min(boxX[0],x[0],x[1])
|
||||||
|
boxX[1] = max(boxX[1],x[0],x[1])
|
||||||
|
boxY[0] = min(boxY[0],y[0],y[1])
|
||||||
|
boxY[1] = max(boxY[1],y[0],y[1])
|
||||||
|
dX = boxX[1]-boxX[0]
|
||||||
|
dY = boxY[1]-boxY[0]
|
||||||
|
|
||||||
|
scaleImg = imagesize/max(dX,dY)
|
||||||
|
scalePatch = size/dY
|
||||||
|
|
||||||
|
if scaleImg > 0: # create image
|
||||||
|
img = Image.new("RGB",map(lambda x:int(round(x))+border*2,(scaleImg*dX,scaleImg*dY)),(255,255,255))
|
||||||
|
draw = ImageDraw.Draw(img)
|
||||||
|
|
||||||
|
# read segments and draw them
|
||||||
|
segment = 0
|
||||||
|
connectivityXY = {"0": {"0":[],"%g"%dY:[],},\
|
||||||
|
"%g"%dX: {"0":[],"%g"%dY:[],},}
|
||||||
|
connectivityYX = {"0": {"0":[],"%g"%dX:[],},\
|
||||||
|
"%g"%dY: {"0":[],"%g"%dX:[],},}
|
||||||
|
grainNeighbors = []
|
||||||
|
|
||||||
|
for line in content:
|
||||||
|
if line[0] != '#': # skip comments
|
||||||
|
(x[0],y[0],x[1],y[1]) = map(float,line.split())[8:12] # get start and end coordinates of each segment
|
||||||
|
# make relative to origin of bounding box
|
||||||
|
x[0] -= boxX[0]
|
||||||
|
x[1] -= boxX[0]
|
||||||
|
y[0]=boxY[1]-y[0]
|
||||||
|
y[1]=boxY[1]-y[1]
|
||||||
|
grainNeighbors.append(map(int,line.split()[12:14])) # remember right and left grain per segment
|
||||||
|
for i in range(2): # store segment to both points
|
||||||
|
match = False # check whether point is already known (within a small range)
|
||||||
|
for posX in connectivityXY.keys():
|
||||||
|
if (abs(float(posX)-x[i])<dX*tolerance):
|
||||||
|
for posY in connectivityXY[posX].keys():
|
||||||
|
if (abs(float(posY)-y[i])<dY*tolerance):
|
||||||
|
keyX = posX
|
||||||
|
keyY = posY
|
||||||
|
match = True
|
||||||
|
break
|
||||||
|
break
|
||||||
|
if (not match):
|
||||||
|
# force to boundary if inside tolerance to it
|
||||||
|
if (abs(x[i])<dX*tolerance):
|
||||||
|
x[i] = 0
|
||||||
|
if (abs(dX-x[i])<dX*tolerance):
|
||||||
|
x[i] = dX
|
||||||
|
if (abs(y[i])<dY*tolerance):
|
||||||
|
y[i] = 0
|
||||||
|
if (abs(dY-y[i])<dY*tolerance):
|
||||||
|
y[i] = dY
|
||||||
|
keyX = "%g"%x[i]
|
||||||
|
keyY = "%g"%y[i]
|
||||||
|
if keyX not in connectivityXY: # create new hash entry for so far unknown point
|
||||||
|
connectivityXY[keyX] = {}
|
||||||
|
if keyY not in connectivityXY[keyX]: # create new hash entry for so far unknown point
|
||||||
|
connectivityXY[keyX][keyY] = []
|
||||||
|
if keyY not in connectivityYX: # create new hash entry for so far unknown point
|
||||||
|
connectivityYX[keyY] = {}
|
||||||
|
if keyX not in connectivityYX[keyY]: # create new hash entry for so far unknown point
|
||||||
|
connectivityYX[keyY][keyX] = []
|
||||||
|
connectivityXY[keyX][keyY].append(segment)
|
||||||
|
connectivityYX[keyY][keyX].append(segment)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.line(map(lambda x:int(scaleImg*x)+border,[x[0],y[0],x[1],y[1]]),fill=(128,128,128))
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[(x[1]+x[0])/2.0,(y[1]+y[0])/2.0]),"%i"%segment,fill=(0,0,128))
|
||||||
|
segment += 1
|
||||||
|
|
||||||
|
|
||||||
|
# top border
|
||||||
|
keyId = "0"
|
||||||
|
boundary = connectivityYX[keyId].keys()
|
||||||
|
boundary.sort(key=float)
|
||||||
|
for indexBdy in range(len(boundary)-1):
|
||||||
|
connectivityXY[boundary[indexBdy]][keyId].append(segment)
|
||||||
|
connectivityXY[boundary[indexBdy+1]][keyId].append(segment)
|
||||||
|
connectivityYX[keyId][boundary[indexBdy]].append(segment)
|
||||||
|
connectivityYX[keyId][boundary[indexBdy+1]].append(segment)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.line(map(lambda x:int(scaleImg*x)+border,[float(boundary[indexBdy]),float(keyId),float(boundary[indexBdy+1]),float(keyId)]),width=3,fill=(128,128*(segment%2),0))
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[(float(boundary[indexBdy])+float(boundary[indexBdy+1]))/2.0,float(keyId)]),"%i"%segment,fill=(0,0,128))
|
||||||
|
segment += 1
|
||||||
|
|
||||||
|
# right border
|
||||||
|
keyId = "%g"%(boxX[1]-boxX[0])
|
||||||
|
boundary = connectivityXY[keyId].keys()
|
||||||
|
boundary.sort(key=float)
|
||||||
|
for indexBdy in range(len(boundary)-1):
|
||||||
|
connectivityYX[boundary[indexBdy]][keyId].append(segment)
|
||||||
|
connectivityYX[boundary[indexBdy+1]][keyId].append(segment)
|
||||||
|
connectivityXY[keyId][boundary[indexBdy]].append(segment)
|
||||||
|
connectivityXY[keyId][boundary[indexBdy+1]].append(segment)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.line(map(lambda x:int(scaleImg*x)+border,[float(keyId),float(boundary[indexBdy]),float(keyId),float(boundary[indexBdy+1])]),width=3,fill=(128,128*(segment%2),0))
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[float(keyId),(float(boundary[indexBdy])+float(boundary[indexBdy+1]))/2.0]),"%i"%segment,fill=(0,0,128))
|
||||||
|
segment += 1
|
||||||
|
|
||||||
|
# bottom border
|
||||||
|
keyId = "%g"%(boxY[1]-boxY[0])
|
||||||
|
boundary = connectivityYX[keyId].keys()
|
||||||
|
boundary.sort(key=float,reverse=True)
|
||||||
|
for indexBdy in range(len(boundary)-1):
|
||||||
|
connectivityXY[boundary[indexBdy]][keyId].append(segment)
|
||||||
|
connectivityXY[boundary[indexBdy+1]][keyId].append(segment)
|
||||||
|
connectivityYX[keyId][boundary[indexBdy]].append(segment)
|
||||||
|
connectivityYX[keyId][boundary[indexBdy+1]].append(segment)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.line(map(lambda x:int(scaleImg*x)+border,[float(boundary[indexBdy]),float(keyId),float(boundary[indexBdy+1]),float(keyId)]),width=3,fill=(128,128*(segment%2),0))
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[(float(boundary[indexBdy])+float(boundary[indexBdy+1]))/2.0,float(keyId)]),"%i"%segment,fill=(0,0,128))
|
||||||
|
segment += 1
|
||||||
|
|
||||||
|
# left border
|
||||||
|
keyId = "0"
|
||||||
|
boundary = connectivityXY[keyId].keys()
|
||||||
|
boundary.sort(key=float,reverse=True)
|
||||||
|
for indexBdy in range(len(boundary)-1):
|
||||||
|
connectivityYX[boundary[indexBdy]][keyId].append(segment)
|
||||||
|
connectivityYX[boundary[indexBdy+1]][keyId].append(segment)
|
||||||
|
connectivityXY[keyId][boundary[indexBdy]].append(segment)
|
||||||
|
connectivityXY[keyId][boundary[indexBdy+1]].append(segment)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.line(map(lambda x:int(scaleImg*x)+border,[float(keyId),float(boundary[indexBdy]),float(keyId),float(boundary[indexBdy+1])]),width=3,fill=(128,128*(segment%2),0))
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[float(keyId),(float(boundary[indexBdy])+float(boundary[indexBdy+1]))/2.0]),"%i"%segment,fill=(0,0,128))
|
||||||
|
segment += 1
|
||||||
|
|
||||||
|
|
||||||
|
allkeysX = connectivityXY.keys()
|
||||||
|
allkeysX.sort()
|
||||||
|
points = []
|
||||||
|
segments = [[] for i in range(segment)]
|
||||||
|
pointId = 0
|
||||||
|
for keyX in allkeysX:
|
||||||
|
allkeysY = connectivityXY[keyX].keys()
|
||||||
|
allkeysY.sort()
|
||||||
|
for keyY in allkeysY:
|
||||||
|
points.append({'coords': [float(keyX)*scalePatch,float(keyY)*scalePatch], 'segments': connectivityXY[keyX][keyY]})
|
||||||
|
for segment in connectivityXY[keyX][keyY]:
|
||||||
|
if (segments[segment] == None):
|
||||||
|
segments[segment] = pointId
|
||||||
|
else:
|
||||||
|
segments[segment].append(pointId)
|
||||||
|
if scaleImg > 0:
|
||||||
|
draw.text(map(lambda x:int(scaleImg*x)+border,[float(keyX),float(keyY)]),"%i"%pointId,fill=(0,0,0))
|
||||||
|
pointId += 1
|
||||||
|
|
||||||
|
if scaleImg > 0:
|
||||||
|
img.save(imagename+'.png',"PNG")
|
||||||
|
|
||||||
|
|
||||||
|
grains = {'draw': [], 'legs': []}
|
||||||
|
pointId = 0
|
||||||
|
for point in points:
|
||||||
|
while point['segments']:
|
||||||
|
myStart = pointId
|
||||||
|
grainDraw = [points[myStart]['coords']]
|
||||||
|
innerAngleSum = 0.0
|
||||||
|
myWalk = point['segments'].pop()
|
||||||
|
grainLegs = [myWalk]
|
||||||
|
if segments[myWalk][0] == myStart:
|
||||||
|
myEnd = segments[myWalk][1]
|
||||||
|
else:
|
||||||
|
myEnd = segments[myWalk][0]
|
||||||
|
|
||||||
|
while (myEnd != pointId):
|
||||||
|
myV = [points[myEnd]['coords'][0]-points[myStart]['coords'][0],\
|
||||||
|
points[myEnd]['coords'][1]-points[myStart]['coords'][1]]
|
||||||
|
myLen = math.sqrt(myV[0]**2+myV[1]**2)
|
||||||
|
best = {'product': -2.0, 'peek': -1, 'len': -1, 'point': -1}
|
||||||
|
for peek in points[myEnd]['segments']:
|
||||||
|
if peek == myWalk:
|
||||||
|
continue
|
||||||
|
if segments[peek][0] == myEnd:
|
||||||
|
peekEnd = segments[peek][1]
|
||||||
|
else:
|
||||||
|
peekEnd = segments[peek][0]
|
||||||
|
peekV = [points[myEnd]['coords'][0]-points[peekEnd]['coords'][0],\
|
||||||
|
points[myEnd]['coords'][1]-points[peekEnd]['coords'][1]]
|
||||||
|
peekLen = math.sqrt(peekV[0]**2+peekV[1]**2)
|
||||||
|
crossproduct = (myV[0]*peekV[1]-myV[1]*peekV[0])/myLen/peekLen
|
||||||
|
dotproduct = (myV[0]*peekV[0]+myV[1]*peekV[1])/myLen/peekLen
|
||||||
|
if crossproduct*(dotproduct+1.0) >= best['product']:
|
||||||
|
best['product'] = crossproduct*(dotproduct+1.0)
|
||||||
|
best['peek'] = peek
|
||||||
|
best['point'] = peekEnd
|
||||||
|
innerAngleSum += best['product']
|
||||||
|
myWalk = best['peek']
|
||||||
|
myStart = myEnd
|
||||||
|
myEnd = best['point']
|
||||||
|
points[myStart]['segments'].remove(myWalk)
|
||||||
|
grainDraw.append(points[myStart]['coords'])
|
||||||
|
grainLegs.append(myWalk)
|
||||||
|
if innerAngleSum > 0.0:
|
||||||
|
grains['draw'].append(grainDraw)
|
||||||
|
grains['legs'].append(grainLegs)
|
||||||
|
else:
|
||||||
|
grains['box'] = grainLegs
|
||||||
|
pointId += 1
|
||||||
|
|
||||||
|
|
||||||
|
# build overall data structure
|
||||||
|
|
||||||
|
rcData = {'dimension':[dX,dY], 'point': [],'segment': [], 'grain': [], 'grainMapping': []}
|
||||||
|
|
||||||
|
for point in points:
|
||||||
|
rcData['point'].append(point['coords'])
|
||||||
|
print "found %i points"%(len(rcData['point']))
|
||||||
|
|
||||||
|
for segment in segments:
|
||||||
|
rcData['segment'].append(segment)
|
||||||
|
print "built %i segments"%(len(rcData['segment']))
|
||||||
|
|
||||||
|
for grain in grains['legs']:
|
||||||
|
rcData['grain'].append(grain)
|
||||||
|
myNeighbors = {}
|
||||||
|
for leg in grain:
|
||||||
|
if leg < len(grainNeighbors):
|
||||||
|
for side in range(2):
|
||||||
|
if grainNeighbors[leg][side] in myNeighbors:
|
||||||
|
myNeighbors[grainNeighbors[leg][side]] += 1
|
||||||
|
else:
|
||||||
|
myNeighbors[grainNeighbors[leg][side]] = 1
|
||||||
|
if myNeighbors: # do I have any neighbors
|
||||||
|
rcData['grainMapping'].append(sorted(myNeighbors.iteritems(), key=lambda (k,v): (v,k), reverse=True)[0][0]) # most frequent grain is me
|
||||||
|
print "found %i grains"%(len(rcData['grain']))
|
||||||
|
|
||||||
|
rcData['box'] = grains['box']
|
||||||
|
|
||||||
|
|
||||||
|
if scaleImg > 0:
|
||||||
|
grainID = 0
|
||||||
|
for grain in grains['draw']:
|
||||||
|
coords = [0,0]
|
||||||
|
for point in grain:
|
||||||
|
coords[0] += int(scaleImg/scalePatch*point[0])
|
||||||
|
coords[1] += int(scaleImg/scalePatch*point[1])
|
||||||
|
coords[0] /= len(grain)
|
||||||
|
coords[1] /= len(grain)
|
||||||
|
draw.text(map(lambda x:x+border,coords),'%i -> %i'%(grainID,rcData['grainMapping'][grainID]),fill=(128,32,32))
|
||||||
|
grainID += 1
|
||||||
|
|
||||||
|
img.save(os.path.splitext(args[0])[0]+'.png',"PNG")
|
||||||
|
|
||||||
|
return rcData
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
return ["*new_model yes",
|
||||||
|
"*select_clear",
|
||||||
|
"*reset",
|
||||||
|
"*set_nodes off",
|
||||||
|
"*elements_solid",
|
||||||
|
"*show_view 4",
|
||||||
|
"*reset_view",
|
||||||
|
"*view_perspective",
|
||||||
|
"*redraw",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def sample(a,n,size):
|
||||||
|
|
||||||
|
cmds = [\
|
||||||
|
# gauge
|
||||||
|
"*add_points %f %f %f"%(-size*a,size*a,0),
|
||||||
|
"*add_points %f %f %f"%( size*a,size*a,0),
|
||||||
|
"*add_points %f %f %f"%( size*a,-size*a,0),
|
||||||
|
"*add_points %f %f %f"%(-size*a,-size*a,0),
|
||||||
|
"*set_curve_type line",
|
||||||
|
"*add_curves %i %i"%(1,2),
|
||||||
|
"*add_curves %i %i"%(3,4),
|
||||||
|
"*set_curve_div_type_fix_ndiv",
|
||||||
|
"*set_curve_div_num %i"%n,
|
||||||
|
"*apply_curve_divisions",
|
||||||
|
"1 2 #",
|
||||||
|
"*add_curves %i %i"%(2,3), # right side
|
||||||
|
"*add_curves %i %i"%(4,1), # left side
|
||||||
|
"*set_curve_div_type_fix_ndiv",
|
||||||
|
"*set_curve_div_num %i"%n,
|
||||||
|
"*apply_curve_divisions",
|
||||||
|
"3 4 #",
|
||||||
|
]
|
||||||
|
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
|
||||||
|
def patch(a,n,mesh,rcData):
|
||||||
|
cmds = []
|
||||||
|
for l in range(len(rcData['point'])): # generate all points
|
||||||
|
cmds.append("*add_points %f %f %f"%(rcData['point'][l][0]-a*rcData['dimension'][0]/rcData['dimension'][1]/2.0,rcData['point'][l][1]-a/2.0,0))
|
||||||
|
|
||||||
|
cmds.append(["*set_curve_type line",
|
||||||
|
"*set_curve_div_type_fix_ndiv",
|
||||||
|
])
|
||||||
|
for m in range(len(rcData['segment'])): # generate all curves and subdivide them for overall balanced piece length
|
||||||
|
start = rcData['segment'][m][0]
|
||||||
|
end = rcData['segment'][m][1]
|
||||||
|
cmds.append([\
|
||||||
|
"*add_curves %i %i" %(start+rcData['offsetPoints'],
|
||||||
|
end +rcData['offsetPoints']),
|
||||||
|
"*set_curve_div_num %i"%(max(1,round(math.sqrt((rcData['point'][start][0]-rcData['point'][end][0])**2+\
|
||||||
|
(rcData['point'][start][1]-rcData['point'][end][1])**2)/a*n))),
|
||||||
|
"*apply_curve_divisions",
|
||||||
|
"%i #"%(m+rcData['offsetSegments']),
|
||||||
|
])
|
||||||
|
|
||||||
|
grain = 0
|
||||||
|
cmds.append('(!)locals["last"] = py_get_int("nelements()")')
|
||||||
|
for g in rcData['grain']:
|
||||||
|
cmds.append([\
|
||||||
|
'(!)locals["first"] = locals["last"]+1',
|
||||||
|
"*%s "%mesh+" ".join([str(rcData['offsetSegments']+x) for x in g])+" #",
|
||||||
|
'(!)locals["last"] = py_get_int("nelements()")',
|
||||||
|
"*select_elements",
|
||||||
|
'(?)"%i to %i #"%(locals["first"],locals["last"])',
|
||||||
|
"*store_elements grain_%i"%rcData['grainMapping'][grain],
|
||||||
|
"all_selected",
|
||||||
|
"*select_clear",
|
||||||
|
])
|
||||||
|
grain += 1
|
||||||
|
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
|
||||||
|
def gage(mesh,rcData):
|
||||||
|
|
||||||
|
return([\
|
||||||
|
"*%s "%mesh +
|
||||||
|
" ".join([str(x) for x in range(1,rcData['offsetSegments'])]) +
|
||||||
|
" " +
|
||||||
|
" ".join([str(rcData['offsetSegments']+x)for x in rcData['box']]) +
|
||||||
|
" #",
|
||||||
|
"*select_reset",
|
||||||
|
"*select_clear",
|
||||||
|
"*select_elements",
|
||||||
|
"all_existing",
|
||||||
|
"*select_mode_except",
|
||||||
|
['grain_%i'%(i+1) for i in range(len(rcData['grain']))],
|
||||||
|
"#",
|
||||||
|
"*store_elements matrix",
|
||||||
|
"all_selected",
|
||||||
|
"*select_mode_invert",
|
||||||
|
"*select_elements",
|
||||||
|
"all_existing",
|
||||||
|
"*store_elements grains",
|
||||||
|
"all_selected",
|
||||||
|
"*select_clear",
|
||||||
|
"*select_reset",
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def expand3D(thickness,steps):
|
||||||
|
return([\
|
||||||
|
"*set_expand_translation z %f"%(thickness/steps),
|
||||||
|
"*set_expand_repetitions %i"%steps,
|
||||||
|
"*expand_elements",
|
||||||
|
"all_existing",
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def initial_conditions(grainNumber,grainMapping):
|
||||||
|
cmds = [\
|
||||||
|
"*new_icond",
|
||||||
|
"*icond_name temperature",
|
||||||
|
"*icond_type state_variable",
|
||||||
|
"*icond_param_value state_var_id 1",
|
||||||
|
"*icond_dof_value var 300",
|
||||||
|
"*add_icond_elements",
|
||||||
|
"all_existing",
|
||||||
|
"*new_icond",
|
||||||
|
"*icond_name homogenization",
|
||||||
|
"*icond_type state_variable",
|
||||||
|
"*icond_param_value state_var_id 2",
|
||||||
|
"*icond_dof_value var 1",
|
||||||
|
"*add_icond_elements",
|
||||||
|
"all_existing",
|
||||||
|
]
|
||||||
|
|
||||||
|
for grain in range(grainNumber):
|
||||||
|
cmds.append([\
|
||||||
|
"*new_icond",
|
||||||
|
"*icond_name grain_%i"%grainMapping[grain],
|
||||||
|
"*icond_type state_variable",
|
||||||
|
"*icond_param_value state_var_id 3",
|
||||||
|
"*icond_dof_value var %i"%(grain+1),
|
||||||
|
"*add_icond_elements",
|
||||||
|
"grain_%i"%grainMapping[grain],
|
||||||
|
"",
|
||||||
|
])
|
||||||
|
cmds.append([\
|
||||||
|
"*new_icond",
|
||||||
|
"*icond_name rim",
|
||||||
|
"*icond_type state_variable",
|
||||||
|
"*icond_param_value state_var_id 3",
|
||||||
|
"*icond_dof_value var %i"%(grainNumber+1),
|
||||||
|
"*add_icond_elements",
|
||||||
|
"matrix",
|
||||||
|
])
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
|
||||||
|
def boundary_conditions(rate,thickness, a,size):
|
||||||
|
inner = size*(1 - 1.0e-4) * a
|
||||||
|
outer = size*(1 + 1.0e-4) * a
|
||||||
|
upper = size*(1 + 1.0e-4) * a
|
||||||
|
lower = size*(1 - 1.0e-4) * a
|
||||||
|
|
||||||
|
return [\
|
||||||
|
"*new_md_table 1 1",
|
||||||
|
"*table_name linear",
|
||||||
|
"*set_md_table_type 1 time",
|
||||||
|
"*table_add",
|
||||||
|
"0 0",
|
||||||
|
"1 1",
|
||||||
|
"*select_method_box",
|
||||||
|
"*new_apply",
|
||||||
|
"*apply_name pull_bottom",
|
||||||
|
"*apply_type fixed_displacement",
|
||||||
|
"*apply_dof y",
|
||||||
|
"*apply_dof_value y %f"%(-rate*(inner+outer)/2.0),
|
||||||
|
"*apply_dof_table y linear",
|
||||||
|
"*select_clear_nodes",
|
||||||
|
"*select_nodes",
|
||||||
|
"%f %f"%(-outer,outer),
|
||||||
|
"%f %f"%(-upper,-lower),
|
||||||
|
"%f %f"%(-.0001*a,(thickness+1.0e-4)*a),
|
||||||
|
"*add_apply_nodes",
|
||||||
|
"all_selected",
|
||||||
|
"*new_apply",
|
||||||
|
"*apply_name pull_top",
|
||||||
|
"*apply_type fixed_displacement",
|
||||||
|
"*apply_dof y",
|
||||||
|
"*apply_dof_value y %f"%(rate*(inner+outer)/2.0),
|
||||||
|
"*apply_dof_table y linear",
|
||||||
|
"*select_clear_nodes",
|
||||||
|
"*select_nodes",
|
||||||
|
"%f %f"%(-outer,outer),
|
||||||
|
"%f %f"%(lower,upper),
|
||||||
|
"%f %f"%(-.0001*a,(thickness+1.0e-4)*a),
|
||||||
|
"*add_apply_nodes",
|
||||||
|
"all_selected",
|
||||||
|
"*new_apply",
|
||||||
|
"*apply_name fix_x",
|
||||||
|
"*apply_type fixed_displacement",
|
||||||
|
"*apply_dof x",
|
||||||
|
"*apply_dof_value x 0",
|
||||||
|
"*select_clear_nodes",
|
||||||
|
"*select_nodes",
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(lower,upper),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(lower,upper),
|
||||||
|
"%f %f"%((thickness-1.0e-4)*a,(thickness+1.0e-4)*a),
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(-upper,-lower),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(-upper,-lower),
|
||||||
|
"%f %f"%((thickness-1.0e-4)*a,(thickness+1.0e-4)*a),
|
||||||
|
"*add_apply_nodes",
|
||||||
|
"all_selected",
|
||||||
|
"*new_apply",
|
||||||
|
"*apply_name fix_z",
|
||||||
|
"*apply_type fixed_displacement",
|
||||||
|
"*apply_dof z",
|
||||||
|
"*apply_dof_value z 0",
|
||||||
|
"*select_clear_nodes",
|
||||||
|
"*select_nodes",
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(lower,upper),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"%f %f"%(-outer,-inner),
|
||||||
|
"%f %f"%(-upper,-lower),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"%f %f"%(inner,outer),
|
||||||
|
"%f %f"%(lower,upper),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"%f %f"%(inner,outer),
|
||||||
|
"%f %f"%(-upper,-lower),
|
||||||
|
"%f %f"%(-.0001*a,.0001*a),
|
||||||
|
"*add_apply_nodes",
|
||||||
|
"all_selected",
|
||||||
|
"*select_clear",
|
||||||
|
"*select_reset",
|
||||||
|
]
|
||||||
|
|
||||||
|
def materials():
|
||||||
|
return [\
|
||||||
|
"*new_material",
|
||||||
|
"*material_name patch",
|
||||||
|
"*material_type mechanical:hypoelastic",
|
||||||
|
"*material_option hypoelastic:method:hypela2",
|
||||||
|
"*material_option hypoelastic:pass:def_rot",
|
||||||
|
"*add_material_elements",
|
||||||
|
"all_existing",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def loadcase(time,incs,Ftol):
|
||||||
|
return [\
|
||||||
|
"*new_loadcase",
|
||||||
|
"*loadcase_name puller",
|
||||||
|
"*loadcase_type static",
|
||||||
|
"*loadcase_value time",
|
||||||
|
"%g"%time,
|
||||||
|
"*loadcase_value nsteps",
|
||||||
|
"%i"%incs,
|
||||||
|
"*loadcase_value maxrec",
|
||||||
|
"20",
|
||||||
|
"*loadcase_value ntime_cuts",
|
||||||
|
"30",
|
||||||
|
"*loadcase_value force",
|
||||||
|
"%g"%Ftol,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def job(grainNumber,grainMapping):
|
||||||
|
return [\
|
||||||
|
"*new_job",
|
||||||
|
"*job_name pull",
|
||||||
|
"*job_class mechanical",
|
||||||
|
"*add_job_loadcases puller",
|
||||||
|
"*add_job_iconds homogenization",
|
||||||
|
["*add_job_iconds grain_%i"%i for i in grainMapping[:grainNumber]],
|
||||||
|
"*add_job_iconds rim",
|
||||||
|
"*job_option dimen:three | 3D analysis",
|
||||||
|
"*job_option strain:large | finite strains",
|
||||||
|
"*job_option large_strn_proc:upd_lagrange | updated Lagrange framework",
|
||||||
|
"*job_option plas_proc:multiplicative | multiplicative decomp of F",
|
||||||
|
"*job_option solver_nonsym:on | nonsymmetrical solution",
|
||||||
|
"*job_option solver:mfront_sparse | multi-frontal sparse",
|
||||||
|
"*job_param stef_boltz 5.670400e-8",
|
||||||
|
"*job_param univ_gas_const 8.314472",
|
||||||
|
"*job_param planck_radiation_2 1.4387752e-2",
|
||||||
|
"*job_param speed_light_vacuum 299792458",
|
||||||
|
"*job_usersub_file /san/%s/FEM/subroutine_svn/mpie_cpfem_marc2007r1.f90 | subroutine definition"%(pwd.getpwuid(os.geteuid())[0].rpartition("\\")[2]),
|
||||||
|
"*job_option user_source:compile_save",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def postprocess():
|
||||||
|
return [\
|
||||||
|
"*add_post_tensor stress",
|
||||||
|
"*add_post_tensor strain",
|
||||||
|
"*add_post_var von_mises",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def cleanUp(a):
|
||||||
|
return [\
|
||||||
|
"*remove_curves",
|
||||||
|
"all_existing",
|
||||||
|
"*remove_points",
|
||||||
|
"all_existing",
|
||||||
|
"*set_sweep_tolerance %f"%(1e-3*a),
|
||||||
|
"*sweep_all",
|
||||||
|
"*renumber_all",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- MAIN -------------------------------
|
||||||
|
|
||||||
|
parser = OptionParser()
|
||||||
|
parser.add_option("-p", "--port", type="int",\
|
||||||
|
dest="port",\
|
||||||
|
help="Mentat connection port")
|
||||||
|
parser.add_option("-a", "--patchsize", type="float",\
|
||||||
|
dest="size",\
|
||||||
|
help="height of patch [%default]")
|
||||||
|
parser.add_option("-f", "--frame", type="float",\
|
||||||
|
dest="frame",\
|
||||||
|
help="frame thickness in units of patch height [%default]")
|
||||||
|
parser.add_option("-n", "--resolution", type="int",\
|
||||||
|
dest="resolution",\
|
||||||
|
help="number of elements along patch perimeter [%default]")
|
||||||
|
parser.add_option("-e", "--strain", type="float",\
|
||||||
|
dest="strain",\
|
||||||
|
help="final strain to reach in simulation [%default]")
|
||||||
|
parser.add_option("-r", "--rate", type="float",\
|
||||||
|
dest="strainrate",\
|
||||||
|
help="(engineering) strain rate to simulate")
|
||||||
|
parser.add_option("-i", "--increments", type="int",\
|
||||||
|
dest="increments",\
|
||||||
|
help="number of increments to take")
|
||||||
|
parser.add_option("-s", "--imagesize", type="int",\
|
||||||
|
dest="imgsize",\
|
||||||
|
help="size of image")
|
||||||
|
parser.add_option("-b", "--border", type="int",\
|
||||||
|
dest="border",\
|
||||||
|
help="border of image")
|
||||||
|
parser.add_option("-t", "--tolerance", type="float",\
|
||||||
|
dest="tolerance",\
|
||||||
|
help="relative tolerance of pixel positions to be swept")
|
||||||
|
parser.add_option("-m", "--mesh", choices=['dt_planar_trimesh','af_planar_trimesh','af_planar_quadmesh'],\
|
||||||
|
dest="mesh",\
|
||||||
|
help="algorithm and element type for automeshing [%default]")
|
||||||
|
|
||||||
|
parser.set_defaults(size = 1.0)
|
||||||
|
parser.set_defaults(frame = 0.5)
|
||||||
|
parser.set_defaults(resolution = 30)
|
||||||
|
parser.set_defaults(strain = 0.2)
|
||||||
|
parser.set_defaults(strainrate = 1.0e-3)
|
||||||
|
parser.set_defaults(increments = 200)
|
||||||
|
parser.set_defaults(imgsize = 0)
|
||||||
|
parser.set_defaults(border = 20)
|
||||||
|
parser.set_defaults(tolerance = 1.0e-3)
|
||||||
|
parser.set_defaults(mesh = 'dt_planar_trimesh')
|
||||||
|
|
||||||
|
(options, args) = parser.parse_args()
|
||||||
|
if not len(args):
|
||||||
|
parser.error('no boundary file specified')
|
||||||
|
|
||||||
|
try:
|
||||||
|
boundaryFile = open(args[0])
|
||||||
|
boundarySegments = boundaryFile.readlines()
|
||||||
|
boundaryFile.close()
|
||||||
|
except:
|
||||||
|
print 'unable to read boundary file "%s"'%args[0]
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
myName = os.path.splitext(args[0])[0]
|
||||||
|
print "\n",myName
|
||||||
|
orientationData = rcbOrientationParser(boundarySegments)
|
||||||
|
rcData = rcbParser(boundarySegments,options.size,options.tolerance,myName,options.imgsize,options.border)
|
||||||
|
|
||||||
|
# ----- write texture data to file -----
|
||||||
|
configFile = open(os.path.splitext(args[0])[0]+'.config','w')
|
||||||
|
configFile.write('\n\n<microstructure>\n\n')
|
||||||
|
for i,grain in enumerate(rcData['grainMapping']):
|
||||||
|
configFile.write('\n[grain %i]\n'%grain)
|
||||||
|
configFile.write('(constituent)\tphase %i\ttexture %i\tfraction 1.0\n'%(i+1,i+1))
|
||||||
|
|
||||||
|
configFile.write('\n\n<phase>\n\n')
|
||||||
|
for grain in rcData['grainMapping']:
|
||||||
|
configFile.write('\n[grain %i]\n'%grain)
|
||||||
|
|
||||||
|
configFile.write('\n\n<texture>\n\n')
|
||||||
|
for grain in rcData['grainMapping']:
|
||||||
|
configFile.write('\n[grain %i]\n'%grain)
|
||||||
|
configFile.write('(gauss)\tphi1\t%f\tphi\t%f\tphi2\t%f\tscatter\t0.0\tfraction\t1.0\n'\
|
||||||
|
%(math.degrees(orientationData[grain-1][0]),math.degrees(orientationData[grain-1][1]),math.degrees(orientationData[grain-1][2])))
|
||||||
|
configFile.close()
|
||||||
|
|
||||||
|
rcData['offsetPoints'] = 1+4 # gage definition generates 4 points
|
||||||
|
rcData['offsetSegments'] = 1+4 # gage definition generates 4 segments
|
||||||
|
|
||||||
|
cmds = [\
|
||||||
|
init(),
|
||||||
|
sample(options.size,12,options.frame+0.5),
|
||||||
|
patch(options.size,options.resolution,options.mesh,rcData),
|
||||||
|
gage(options.mesh,rcData),
|
||||||
|
expand3D(options.size/6,4),
|
||||||
|
cleanUp(options.size),
|
||||||
|
materials(),
|
||||||
|
initial_conditions(len(rcData['grain']),rcData['grainMapping']),
|
||||||
|
boundary_conditions(options.strainrate,options.size/6,options.size,options.frame+0.5),
|
||||||
|
loadcase(options.strain/options.strainrate,options.increments,0.03),
|
||||||
|
job(len(rcData['grain']),rcData['grainMapping']),
|
||||||
|
postprocess(),
|
||||||
|
["*identify_sets","*regen","*fill_view","*save_as_model %s yes"%(myName)],
|
||||||
|
]
|
||||||
|
|
||||||
|
outputLocals = {}
|
||||||
|
if (options.port != None):
|
||||||
|
py_connect('',options.port)
|
||||||
|
output(cmds,outputLocals,'Mentat')
|
||||||
|
py_disconnect()
|
||||||
|
else:
|
||||||
|
output(cmds,outputLocals,'Stdout')
|
||||||
|
|
||||||
|
print outputLocals
|
||||||
|
|
||||||
|
# "*job_option large:on | large displacement",
|
||||||
|
# "*job_option plasticity:l_strn_mn_add | large strain additive",
|
||||||
|
# "*job_option cdilatation:on | constant dilatation",
|
||||||
|
# "*job_option update:on | updated lagrange procedure",
|
||||||
|
# "*job_option finite:on | large strains",
|
||||||
|
# "*job_option restart_mode:write | enable restarting",
|
|
@ -0,0 +1,175 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
releases = ['2010','2008r1','2007r1','2005r3']
|
||||||
|
|
||||||
|
|
||||||
|
import sys,os,pwd,math,re
|
||||||
|
#import Image,ImageDraw
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
for release in releases:
|
||||||
|
libPath = '/msc/mentat%s/shlib/'%release
|
||||||
|
if os.path.exists(libPath):
|
||||||
|
sys.path.append(libPath)
|
||||||
|
for subdir in [os.path.join(libPath,file)
|
||||||
|
for file in os.listdir(libPath)
|
||||||
|
if os.path.isdir(os.path.join(libPath,file))]:
|
||||||
|
sys.path.append(subdir)
|
||||||
|
break
|
||||||
|
from py_mentat import *
|
||||||
|
|
||||||
|
def outMentat(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
py_send(cmd)
|
||||||
|
else:
|
||||||
|
py_send(cmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
def outStdout(cmd,locals):
|
||||||
|
if cmd[0:3] == '(!)':
|
||||||
|
exec(cmd[3:])
|
||||||
|
elif cmd[0:3] == '(?)':
|
||||||
|
cmd = eval(cmd[3:])
|
||||||
|
print cmd
|
||||||
|
else:
|
||||||
|
print cmd
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def output(cmds,locals,dest):
|
||||||
|
for cmd in cmds:
|
||||||
|
if isinstance(cmd,list):
|
||||||
|
output(cmd,locals,dest)
|
||||||
|
else:
|
||||||
|
{\
|
||||||
|
'Mentat': outMentat,\
|
||||||
|
'Stdout': outStdout,\
|
||||||
|
}[dest](cmd,locals)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def servoLink():
|
||||||
|
|
||||||
|
cmds = []
|
||||||
|
base = ['x','y','z']
|
||||||
|
box = {'min': {'x': float(sys.maxint),'y': float(sys.maxint),'z': float(sys.maxint)},
|
||||||
|
'max': {'x':-float(sys.maxint),'y':-float(sys.maxint),'z':-float(sys.maxint)},
|
||||||
|
'delta': {'x':0,'y':0,'z':0},
|
||||||
|
}
|
||||||
|
Nnodes = py_get_int("nnodes()")
|
||||||
|
NodeCoords = [{'x':py_get_float("node_x(%i)"%(node)),
|
||||||
|
'y':py_get_float("node_y(%i)"%(node)),
|
||||||
|
'z':py_get_float("node_z(%i)"%(node)),} for node in range(1,1+Nnodes)]
|
||||||
|
|
||||||
|
for node in range(Nnodes): # find the bounding box
|
||||||
|
for coord in base: # check each direction in turn
|
||||||
|
box['min'][coord] = min(box['min'][coord],NodeCoords[node][coord])
|
||||||
|
box['max'][coord] = max(box['max'][coord],NodeCoords[node][coord])
|
||||||
|
|
||||||
|
for coord in base: # calc the dimension of the bounding box
|
||||||
|
box['delta'][coord] = box['max'][coord] - box['min'][coord]
|
||||||
|
|
||||||
|
baseNode = {}
|
||||||
|
linkNodes = []
|
||||||
|
|
||||||
|
for node in range(Nnodes): # loop over all nodes
|
||||||
|
pos = {}
|
||||||
|
key = {}
|
||||||
|
maxFlag = {'x': False, 'y': False, 'z': False}
|
||||||
|
Nmax = 0
|
||||||
|
Nmin = 0
|
||||||
|
for coord in base: # for each direction
|
||||||
|
key[coord] = "%.8e"%NodeCoords[node][coord] # translate position to string
|
||||||
|
if (key[coord] == "%.8e"%box['min'][coord]): # compare to min of bounding box (i.e. is on outer face?)
|
||||||
|
Nmin += 1 # count outer (back) face membership
|
||||||
|
elif (key[coord] == "%.8e"%box['max'][coord]): # compare to max of bounding box (i.e. is on outer face?)
|
||||||
|
Nmax += 1 # count outer (front) face memebership
|
||||||
|
maxFlag[coord] = True # remember face membership (for linked nodes)
|
||||||
|
|
||||||
|
if Nmin > 0 and Nmin > Nmax: # node is on more back than font faces
|
||||||
|
# prepare for any non-existing entries in the data structure
|
||||||
|
if key['x'] not in baseNode.keys():
|
||||||
|
baseNode[key['x']] = {}
|
||||||
|
if key['y'] not in baseNode[key['x']].keys():
|
||||||
|
baseNode[key['x']][key['y']] = {}
|
||||||
|
if key['z'] not in baseNode[key['x']][key['y']].keys():
|
||||||
|
baseNode[key['x']][key['y']][key['z']] = 0
|
||||||
|
|
||||||
|
baseNode[key['x']][key['y']][key['z']] = node+1 # remember the base node id
|
||||||
|
|
||||||
|
elif Nmax > 0 and Nmax >= Nmin: # node is on at least as many front than back faces
|
||||||
|
linkNodes.append({'id': node+1,'coord': NodeCoords[node], 'onFaces': Nmax,'faceMember': maxFlag})
|
||||||
|
|
||||||
|
|
||||||
|
baseCorner = baseNode["%.8e"%box['min']['x']]["%.8e"%box['min']['y']]["%.8e"%box['min']['z']] # detect ultimate base node
|
||||||
|
|
||||||
|
for node in linkNodes: # loop over all linked nodes
|
||||||
|
linkCoord = [node['coord']] # start list of control node coords with my coords
|
||||||
|
for dir in base: # check for each direction
|
||||||
|
if node['faceMember'][dir]: # me on this front face
|
||||||
|
linkCoord[0][dir] = box['min'][dir] # project me onto rear face along dir
|
||||||
|
linkCoord.append({'x':box['min']['x'],'y':box['min']['y'],'z':box['min']['z'],}) # append base corner
|
||||||
|
linkCoord[-1][dir] = box['max'][dir] # stretch it to corresponding control leg of "dir"
|
||||||
|
|
||||||
|
nLinks = len(linkCoord)
|
||||||
|
|
||||||
|
for dof in [1,2,3]:
|
||||||
|
cmds.append([
|
||||||
|
"*new_link *link_class servo",
|
||||||
|
"*link_class servo *tied_node %i"%node['id'],
|
||||||
|
"*link_class servo *tied_dof %i"%dof,
|
||||||
|
"*servo_nterms %i"%(1+nLinks),
|
||||||
|
])
|
||||||
|
for i in range(nLinks):
|
||||||
|
cmds.append([
|
||||||
|
"*link_class servo *servo_ret_node %i %i"%(i+1,baseNode["%.8e"%linkCoord[i]['x']]["%.8e"%linkCoord[i]['y']]["%.8e"%linkCoord[i]['z']]),
|
||||||
|
"*link_class servo *servo_ret_dof %i %i"%(i+1,dof),
|
||||||
|
"*link_class servo *servo_ret_coef %i 1"%(i+1),
|
||||||
|
])
|
||||||
|
cmds.append([
|
||||||
|
"*link_class servo *servo_ret_node %i %i"%(1+nLinks,baseCorner),
|
||||||
|
"*link_class servo *servo_ret_dof %i %i"%(1+nLinks,dof),
|
||||||
|
"*link_class servo *servo_ret_coef %i -%i"%(1+nLinks,nLinks-1),
|
||||||
|
])
|
||||||
|
|
||||||
|
cmds.append([
|
||||||
|
"*select_nodes",
|
||||||
|
["%i"%node['id'] for node in linkNodes],
|
||||||
|
"#",
|
||||||
|
])
|
||||||
|
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------- MAIN -------------------------------
|
||||||
|
|
||||||
|
parser = OptionParser()
|
||||||
|
parser.add_option("-p", "--port", type="int",\
|
||||||
|
dest="port",\
|
||||||
|
help="Mentat connection port [%default]")
|
||||||
|
parser.add_option("-v", "--verbose", action="store_true",\
|
||||||
|
dest="verbose",\
|
||||||
|
help="write Mentat command stream also to stdout [%default]")
|
||||||
|
parser.set_defaults(port=40007)
|
||||||
|
parser.set_defaults(verbose=False)
|
||||||
|
|
||||||
|
(options, args) = parser.parse_args()
|
||||||
|
|
||||||
|
outputLocals = {}
|
||||||
|
print 'waiting to connect...'
|
||||||
|
py_connect('',options.port)
|
||||||
|
output(['*remove_all_servos',
|
||||||
|
'*sweep_all',
|
||||||
|
'*renumber_nodes'],outputLocals,'Mentat') # script depends on consecutive numbering of nodes
|
||||||
|
cmds = servoLink()
|
||||||
|
print 'connected...'
|
||||||
|
output(cmds,outputLocals,'Mentat')
|
||||||
|
py_disconnect()
|
||||||
|
|
||||||
|
if options.verbose:
|
||||||
|
output(cmds,outputLocals,'Stdout')
|
Loading…
Reference in New Issue