Merge branch 'development' of magit1.mpie.de:damask/DAMASK into development
This commit is contained in:
commit
219d489df4
|
@ -1,61 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,string,h5py
|
|
||||||
import numpy as np
|
|
||||||
from optparse import OptionParser
|
|
||||||
import damask
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
Add column(s) containing Cauchy stress based on given column(s) of
|
|
||||||
deformation gradient and first Piola--Kirchhoff stress.
|
|
||||||
|
|
||||||
""" + string.replace('$Id$','\n','\\n')
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
parser.add_option('-f','--defgrad', dest='defgrad', \
|
|
||||||
help='heading of columns containing deformation gradient [%default]')
|
|
||||||
parser.add_option('-p','--stress', dest='stress', \
|
|
||||||
help='heading of columns containing first Piola--Kirchhoff stress [%default]')
|
|
||||||
parser.add_option('-o','--output', dest='output', \
|
|
||||||
help='group containing requested data [%default]')
|
|
||||||
parser.set_defaults(defgrad = 'f')
|
|
||||||
parser.set_defaults(stress = 'p')
|
|
||||||
parser.set_defaults(output = 'crystallite')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if options.defgrad is None or options.stress is None or options.output is None:
|
|
||||||
parser.error('missing data column...')
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------ setup file handles ---------------------------------------
|
|
||||||
|
|
||||||
files = []
|
|
||||||
for name in filenames:
|
|
||||||
if os.path.exists(name):
|
|
||||||
files.append({'name':name, 'file':h5py.File(name,"a")})
|
|
||||||
|
|
||||||
# ------------------------------------------ loop over input files ------------------------------------
|
|
||||||
|
|
||||||
for myFile in files:
|
|
||||||
print(myFile['name'])
|
|
||||||
|
|
||||||
# ------------------------------------------ loop over increments -------------------------------------
|
|
||||||
for inc in myFile['file']['increments'].keys():
|
|
||||||
print("Current Increment: "+inc)
|
|
||||||
for instance in myFile['file']['increments/'+inc+'/'+options.output].keys():
|
|
||||||
dsets = myFile['file']['increments/'+inc+'/'+options.output+'/'+instance].keys()
|
|
||||||
if (options.defgrad in dsets and options.stress in dsets):
|
|
||||||
defgrad = myFile['file']['increments/'+inc+'/'+options.output+'/'+instance+'/'+options.defgrad]
|
|
||||||
stress = myFile['file']['increments/'+inc+'/'+options.output+'/'+instance+'/'+options.stress]
|
|
||||||
cauchy=np.zeros(np.shape(stress),'f')
|
|
||||||
for p in range(stress.shape[0]):
|
|
||||||
cauchy[p,...] = 1.0/np.linalg.det(defgrad[p,...])*np.dot(stress[p,...],defgrad[p,...].T) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
|
|
||||||
cauchyFile = myFile['file']['increments/'+inc+'/'+options.output+'/'+instance].create_dataset('cauchy', data=cauchy)
|
|
||||||
cauchyFile.attrs['units'] = 'Pa'
|
|
|
@ -1,144 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,shutil
|
|
||||||
import numpy as np
|
|
||||||
import damask
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# MAIN FUNCTION STARTS HERE
|
|
||||||
# -----------------------------
|
|
||||||
|
|
||||||
# --- input parsing
|
|
||||||
|
|
||||||
parser = OptionParser(usage='%prog [options] resultfile', description = """
|
|
||||||
Create vtk files for the (deformed) geometry that belongs to a .t16 (MSC.Marc) results file.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-d','--dir', dest='dir', \
|
|
||||||
help='name of subdirectory to hold output [%default]')
|
|
||||||
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
|
|
||||||
help='range of positions (or increments) to output (start, end, step) [all]')
|
|
||||||
parser.add_option('--increments', action='store_true', dest='getIncrements', \
|
|
||||||
help='switch to increment range [%default]')
|
|
||||||
parser.add_option('-t','--type', dest='type', type='choice', choices=['ipbased','nodebased'], \
|
|
||||||
help='processed geometry type [ipbased and nodebased]')
|
|
||||||
|
|
||||||
parser.set_defaults(dir = 'vtk')
|
|
||||||
parser.set_defaults(getIncrements= False)
|
|
||||||
|
|
||||||
(options, files) = parser.parse_args()
|
|
||||||
|
|
||||||
# --- basic sanity checks
|
|
||||||
|
|
||||||
if files == []:
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('no file specified...')
|
|
||||||
|
|
||||||
filename = os.path.splitext(files[0])[0]
|
|
||||||
if not os.path.exists(filename+'.t16'):
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('invalid file "%s" specified...'%filename+'.t16')
|
|
||||||
|
|
||||||
if not options.type :
|
|
||||||
options.type = ['nodebased', 'ipbased']
|
|
||||||
else:
|
|
||||||
options.type = [options.type]
|
|
||||||
|
|
||||||
|
|
||||||
# --- more sanity checks
|
|
||||||
|
|
||||||
sys.path.append(damask.solver.Marc().libraryPath('../../'))
|
|
||||||
try:
|
|
||||||
import py_post
|
|
||||||
except:
|
|
||||||
print('error: no valid Mentat release found')
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- open results file and initialize mesh ----------
|
|
||||||
|
|
||||||
p = py_post.post_open(filename+'.t16')
|
|
||||||
p.moveto(0)
|
|
||||||
Nnodes = p.nodes()
|
|
||||||
Nincrements = p.increments() - 1 # t16 contains one "virtual" increment (at 0)
|
|
||||||
if damask.core.mesh.mesh_init_postprocessing(filename+'.mesh') > 0:
|
|
||||||
print('error: init not successful')
|
|
||||||
sys.exit(-1)
|
|
||||||
Ncellnodes = damask.core.mesh.mesh_get_Ncellnodes()
|
|
||||||
unitlength = damask.core.mesh.mesh_get_unitlength()
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- create output dir --------------------------------
|
|
||||||
|
|
||||||
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
|
|
||||||
if not os.path.isdir(dirname):
|
|
||||||
os.mkdir(dirname,0755)
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- get positions --------------------------------
|
|
||||||
|
|
||||||
incAtPosition = {}
|
|
||||||
positionOfInc = {}
|
|
||||||
|
|
||||||
for position in range(Nincrements):
|
|
||||||
p.moveto(position+1)
|
|
||||||
incAtPosition[position] = p.increment # remember "real" increment at this position
|
|
||||||
positionOfInc[p.increment] = position # remember position of "real" increment
|
|
||||||
|
|
||||||
if not options.range:
|
|
||||||
options.getIncrements = False
|
|
||||||
locations = range(Nincrements) # process all positions
|
|
||||||
else:
|
|
||||||
options.range = list(options.range) # convert to list
|
|
||||||
if options.getIncrements:
|
|
||||||
locations = [positionOfInc[x] for x in range(options.range[0],options.range[1]+1,options.range[2])
|
|
||||||
if x in positionOfInc]
|
|
||||||
else:
|
|
||||||
locations = range( max(0,options.range[0]),
|
|
||||||
min(Nincrements,options.range[1]+1),
|
|
||||||
options.range[2] )
|
|
||||||
|
|
||||||
increments = [incAtPosition[x] for x in locations] # build list of increments to process
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- loop over positions --------------------------------
|
|
||||||
|
|
||||||
for incCount,position in enumerate(locations): # walk through locations
|
|
||||||
|
|
||||||
p.moveto(position+1) # wind to correct position
|
|
||||||
|
|
||||||
# --- get displacements
|
|
||||||
|
|
||||||
node_displacement = [[0,0,0] for i in range(Nnodes)]
|
|
||||||
for n in range(Nnodes):
|
|
||||||
if p.node_displacements():
|
|
||||||
node_displacement[n] = map(lambda x:x*unitlength,list(p.node_displacement(n)))
|
|
||||||
c = damask.core.mesh.mesh_build_cellnodes(np.array(node_displacement).T,Ncellnodes)
|
|
||||||
cellnode_displacement = [[c[i][n] for i in range(3)] for n in range(Ncellnodes)]
|
|
||||||
|
|
||||||
|
|
||||||
# --- append displacements to corresponding files
|
|
||||||
|
|
||||||
for geomtype in options.type:
|
|
||||||
outFilename = eval('"'+eval("'%%s_%%s_inc%%0%ii.vtk'%(math.log10(max(increments+[1]))+1)")\
|
|
||||||
+'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])')
|
|
||||||
print outFilename
|
|
||||||
shutil.copyfile('%s_%s.vtk'%(filename,geomtype),outFilename)
|
|
||||||
|
|
||||||
with open(outFilename,'a') as myfile:
|
|
||||||
myfile.write("POINT_DATA %i\n"%{'nodebased':Nnodes,'ipbased':Ncellnodes}[geomtype])
|
|
||||||
myfile.write("VECTORS displacement double\n")
|
|
||||||
coordinates = {'nodebased':node_displacement,'ipbased':cellnode_displacement}[geomtype]
|
|
||||||
for n in range({'nodebased':Nnodes,'ipbased':Ncellnodes}[geomtype]):
|
|
||||||
myfile.write("%.8e %.8e %.8e\n"%(coordinates[n][0],coordinates[n][1],coordinates[n][2]))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- DONE --------------------------------
|
|
|
@ -1,421 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,string,re,time
|
|
||||||
from optparse import OptionParser, OptionGroup
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def ParseOutputFormat(filename,homogID,crystID,phaseID):
|
|
||||||
"""parse .output* files in order to get a list of outputs"""
|
|
||||||
myID = {'Homogenization': homogID,
|
|
||||||
'Crystallite': crystID,
|
|
||||||
'Constitutive': phaseID,
|
|
||||||
}
|
|
||||||
format = {}
|
|
||||||
|
|
||||||
for what in ['Homogenization','Crystallite','Constitutive']:
|
|
||||||
content = []
|
|
||||||
format[what] = {'outputs':{},'specials':{'brothers':[]}}
|
|
||||||
for prefix in ['']+map(str,range(1,17)):
|
|
||||||
if os.path.exists(prefix+filename+'.output'+what):
|
|
||||||
try:
|
|
||||||
file = open(prefix+filename+'.output'+what)
|
|
||||||
content = file.readlines()
|
|
||||||
file.close()
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if content == []: continue # nothing found...
|
|
||||||
|
|
||||||
tag = ''
|
|
||||||
tagID = 0
|
|
||||||
for line in content:
|
|
||||||
if re.match("\s*$",line) or re.match("#",line): # skip blank lines and comments
|
|
||||||
continue
|
|
||||||
m = re.match("\[(.+)\]",line) # look for block indicator
|
|
||||||
if m: # next section
|
|
||||||
tag = m.group(1)
|
|
||||||
tagID += 1
|
|
||||||
format[what]['specials']['brothers'].append(tag)
|
|
||||||
if tag == myID[what] or (myID[what].isdigit() and tagID == int(myID[what])):
|
|
||||||
format[what]['specials']['_id'] = tagID
|
|
||||||
format[what]['outputs'] = []
|
|
||||||
tag = myID[what]
|
|
||||||
else: # data from section
|
|
||||||
if tag == myID[what]:
|
|
||||||
(output,length) = line.split()
|
|
||||||
output.lower()
|
|
||||||
if length.isdigit():
|
|
||||||
length = int(length)
|
|
||||||
if re.match("\((.+)\)",output): # special data, e.g. (Ngrains)
|
|
||||||
format[what]['specials'][output] = length
|
|
||||||
elif length > 0:
|
|
||||||
format[what]['outputs'].append([output,length])
|
|
||||||
|
|
||||||
if '_id' not in format[what]['specials']:
|
|
||||||
print "\nsection '%s' not found in <%s>"%(myID[what], what)
|
|
||||||
print '\n'.join(map(lambda x:' [%s]'%x, format[what]['specials']['brothers']))
|
|
||||||
|
|
||||||
return format
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def ParsePostfile(p,filename, outputFormat, legacyFormat):
|
|
||||||
"""
|
|
||||||
parse postfile in order to get position and labels of outputs
|
|
||||||
|
|
||||||
needs "outputFormat" for mapping of output names to postfile output indices
|
|
||||||
"""
|
|
||||||
startVar = {True: 'GrainCount',
|
|
||||||
False:'HomogenizationCount'}
|
|
||||||
|
|
||||||
# --- build statistics
|
|
||||||
|
|
||||||
stat = { \
|
|
||||||
'IndexOfLabel': {}, \
|
|
||||||
'Title': p.title(), \
|
|
||||||
'Extrapolation': p.extrapolate, \
|
|
||||||
'NumberOfIncrements': p.increments() - 1, \
|
|
||||||
'NumberOfNodes': p.nodes(), \
|
|
||||||
'NumberOfNodalScalars': p.node_scalars(), \
|
|
||||||
'LabelOfNodalScalar': [None]*p.node_scalars() , \
|
|
||||||
'NumberOfElements': p.elements(), \
|
|
||||||
'NumberOfElementalScalars': p.element_scalars(), \
|
|
||||||
'LabelOfElementalScalar': [None]*p.element_scalars() , \
|
|
||||||
'NumberOfElementalTensors': p.element_tensors(), \
|
|
||||||
'LabelOfElementalTensor': [None]*p.element_tensors(), \
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- find labels
|
|
||||||
|
|
||||||
for labelIndex in range(stat['NumberOfNodalScalars']):
|
|
||||||
label = p.node_scalar_label(labelIndex)
|
|
||||||
stat['IndexOfLabel'][label] = labelIndex
|
|
||||||
stat['LabelOfNodalScalar'][labelIndex] = label
|
|
||||||
|
|
||||||
for labelIndex in range(stat['NumberOfElementalScalars']):
|
|
||||||
label = p.element_scalar_label(labelIndex)
|
|
||||||
stat['IndexOfLabel'][label] = labelIndex
|
|
||||||
stat['LabelOfElementalScalar'][labelIndex] = label
|
|
||||||
|
|
||||||
for labelIndex in range(stat['NumberOfElementalTensors']):
|
|
||||||
label = p.element_tensor_label(labelIndex)
|
|
||||||
stat['IndexOfLabel'][label] = labelIndex
|
|
||||||
stat['LabelOfElementalTensor'][labelIndex] = label
|
|
||||||
|
|
||||||
if 'User Defined Variable 1' in stat['IndexOfLabel']: # output format without dedicated names?
|
|
||||||
stat['IndexOfLabel'][startVar[legacyFormat]] = stat['IndexOfLabel']['User Defined Variable 1'] # adjust first named entry
|
|
||||||
|
|
||||||
if startVar[legacyFormat] in stat['IndexOfLabel']: # does the result file contain relevant user defined output at all?
|
|
||||||
startIndex = stat['IndexOfLabel'][startVar[legacyFormat]]
|
|
||||||
stat['LabelOfElementalScalar'][startIndex] = startVar[legacyFormat]
|
|
||||||
|
|
||||||
# We now have to find a mapping for each output label as defined in the .output* files to the output position in the post file
|
|
||||||
# Since we know where the user defined outputs start ("startIndex"), we can simply assign increasing indices to the labels
|
|
||||||
# given in the .output* file
|
|
||||||
|
|
||||||
offset = 1
|
|
||||||
if legacyFormat:
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = startVar[not legacyFormat] # add HomogenizationCount as second
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
for (name,N) in outputFormat['Homogenization']['outputs']:
|
|
||||||
for i in range(N):
|
|
||||||
label = {False: '%s'%( name),
|
|
||||||
True:'%i_%s'%(i+1,name)}[N > 1]
|
|
||||||
stat['IndexOfLabel'][label] = startIndex + offset
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = label
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
if not legacyFormat:
|
|
||||||
stat['IndexOfLabel'][startVar[not legacyFormat]] = startIndex + offset
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = startVar[not legacyFormat] # add GrainCount
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
if '(ngrains)' in outputFormat['Homogenization']['specials']:
|
|
||||||
for grain in range(outputFormat['Homogenization']['specials']['(ngrains)']):
|
|
||||||
|
|
||||||
stat['IndexOfLabel']['%i_CrystalliteCount'%(grain+1)] = startIndex + offset # report crystallite count
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = '%i_CrystalliteCount'%(grain+1) # add GrainCount
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
for (name,N) in outputFormat['Crystallite']['outputs']: # add crystallite outputs
|
|
||||||
for i in range(N):
|
|
||||||
label = {False: '%i_%s'%(grain+1, name),
|
|
||||||
True:'%i_%i_%s'%(grain+1,i+1,name)}[N > 1]
|
|
||||||
stat['IndexOfLabel'][label] = startIndex + offset
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = label
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
stat['IndexOfLabel']['%i_ConstitutiveCount'%(grain+1)] = startIndex + offset # report constitutive count
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = '%i_ConstitutiveCount'%(grain+1) # add GrainCount
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
for (name,N) in outputFormat['Constitutive']['outputs']: # add constitutive outputs
|
|
||||||
for i in range(N):
|
|
||||||
label = {False: '%i_%s'%(grain+1, name),
|
|
||||||
True:'%i_%i_%s'%(grain+1,i+1,name)}[N > 1]
|
|
||||||
stat['IndexOfLabel'][label] = startIndex + offset
|
|
||||||
try:
|
|
||||||
stat['LabelOfElementalScalar'][startIndex + offset] = label
|
|
||||||
except IndexError:
|
|
||||||
print 'trying to assign %s at position %i+%i'%(label,startIndex,offset)
|
|
||||||
sys.exit(1)
|
|
||||||
offset += 1
|
|
||||||
|
|
||||||
return stat
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def GetIncrementLocations(p,Nincrements,options):
|
|
||||||
"""get mapping between positions in postfile and increment number"""
|
|
||||||
incAtPosition = {}
|
|
||||||
positionOfInc = {}
|
|
||||||
|
|
||||||
for position in range(Nincrements):
|
|
||||||
p.moveto(position+1)
|
|
||||||
incAtPosition[position] = p.increment # remember "real" increment at this position
|
|
||||||
positionOfInc[p.increment] = position # remember position of "real" increment
|
|
||||||
|
|
||||||
if not options.range:
|
|
||||||
options.getIncrements = False
|
|
||||||
locations = range(Nincrements) # process all positions
|
|
||||||
else:
|
|
||||||
options.range = list(options.range) # convert to list
|
|
||||||
if options.getIncrements:
|
|
||||||
locations = [positionOfInc[x] for x in range(options.range[0],options.range[1]+1,options.range[2])
|
|
||||||
if x in positionOfInc]
|
|
||||||
else:
|
|
||||||
locations = range( max(0,options.range[0]),
|
|
||||||
min(Nincrements,options.range[1]+1),
|
|
||||||
options.range[2] )
|
|
||||||
|
|
||||||
increments = [incAtPosition[x] for x in locations] # build list of increments to process
|
|
||||||
|
|
||||||
return [increments,locations]
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def SummarizePostfile(stat,where=sys.stdout):
|
|
||||||
|
|
||||||
where.write('\n\n')
|
|
||||||
where.write('title:\t%s'%stat['Title'] + '\n\n')
|
|
||||||
where.write('extraplation:\t%s'%stat['Extrapolation'] + '\n\n')
|
|
||||||
where.write('increments:\t%i'%(stat['NumberOfIncrements']) + '\n\n')
|
|
||||||
where.write('nodes:\t%i'%stat['NumberOfNodes'] + '\n\n')
|
|
||||||
where.write('elements:\t%i'%stat['NumberOfElements'] + '\n\n')
|
|
||||||
where.write('nodal scalars:\t%i'%stat['NumberOfNodalScalars'] + '\n\n '\
|
|
||||||
+'\n '.join(stat['LabelOfNodalScalar']) + '\n\n')
|
|
||||||
where.write('elemental scalars:\t%i'%stat['NumberOfElementalScalars'] + '\n\n '\
|
|
||||||
+ '\n '.join(stat['LabelOfElementalScalar']) + '\n\n')
|
|
||||||
where.write('elemental tensors:\t%i'%stat['NumberOfElementalTensors'] + '\n\n '\
|
|
||||||
+ '\n '.join(stat['LabelOfElementalTensor']) + '\n\n')
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def SummarizeOutputfile(format,where=sys.stdout):
|
|
||||||
|
|
||||||
where.write('\nUser Defined Outputs')
|
|
||||||
for what in format.keys():
|
|
||||||
where.write('\n\n %s:'%what)
|
|
||||||
for output in format[what]['outputs']:
|
|
||||||
where.write('\n %s'%output)
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def writeHeader(myfile,stat,geomtype):
|
|
||||||
|
|
||||||
myfile.write('2\theader\n')
|
|
||||||
myfile.write(string.replace('$Id$','\n','\\n')+
|
|
||||||
'\t' + ' '.join(sys.argv[1:]) + '\n')
|
|
||||||
if geomtype == 'nodebased':
|
|
||||||
myfile.write('node')
|
|
||||||
for i in range(stat['NumberOfNodalScalars']):
|
|
||||||
myfile.write('\t%s'%''.join(stat['LabelOfNodalScalar'][i].split()))
|
|
||||||
|
|
||||||
elif geomtype == 'ipbased':
|
|
||||||
myfile.write('elem\tip')
|
|
||||||
for i in range(stat['NumberOfElementalScalars']):
|
|
||||||
myfile.write('\t%s'%''.join(stat['LabelOfElementalScalar'][i].split()))
|
|
||||||
|
|
||||||
myfile.write('\n')
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
Extract data from a .t16 (MSC.Marc) results file.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-i','--info', action='store_true', dest='info', \
|
|
||||||
help='list contents of resultfile [%default]')
|
|
||||||
parser.add_option('-l','--legacy', action='store_true', dest='legacy', \
|
|
||||||
help='legacy user result block (starts with GrainCount) [%default]')
|
|
||||||
parser.add_option('-d','--dir', dest='dir', \
|
|
||||||
help='name of subdirectory to hold output [%default]')
|
|
||||||
parser.add_option('-r','--range', dest='range', type='int', nargs=3, \
|
|
||||||
help='range of positions (or increments) to output (start, end, step) [all]')
|
|
||||||
parser.add_option('--increments', action='store_true', dest='getIncrements', \
|
|
||||||
help='switch to increment range [%default]')
|
|
||||||
parser.add_option('-t','--type', dest='type', type='choice', choices=['ipbased','nodebased'], \
|
|
||||||
help='processed geometry type [ipbased and nodebased]')
|
|
||||||
|
|
||||||
group_material = OptionGroup(parser,'Material identifier')
|
|
||||||
|
|
||||||
group_material.add_option('--homogenization', dest='homog', \
|
|
||||||
help='homogenization identifier (as string or integer [%default])', metavar='<ID>')
|
|
||||||
group_material.add_option('--crystallite', dest='cryst', \
|
|
||||||
help='crystallite identifier (as string or integer [%default])', metavar='<ID>')
|
|
||||||
group_material.add_option('--phase', dest='phase', \
|
|
||||||
help='phase identifier (as string or integer [%default])', metavar='<ID>')
|
|
||||||
|
|
||||||
parser.add_option_group(group_material)
|
|
||||||
|
|
||||||
parser.set_defaults(info = False)
|
|
||||||
parser.set_defaults(legacy = False)
|
|
||||||
parser.set_defaults(dir = 'vtk')
|
|
||||||
parser.set_defaults(getIncrements= False)
|
|
||||||
parser.set_defaults(homog = '1')
|
|
||||||
parser.set_defaults(cryst = '1')
|
|
||||||
parser.set_defaults(phase = '1')
|
|
||||||
|
|
||||||
(options, files) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
# --- sanity checks
|
|
||||||
|
|
||||||
if files == []:
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('no file specified...')
|
|
||||||
|
|
||||||
filename = os.path.splitext(files[0])[0]
|
|
||||||
if not os.path.exists(filename+'.t16'):
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('invalid file "%s" specified...'%filename+'.t16')
|
|
||||||
|
|
||||||
sys.path.append(damask.solver.Marc().libraryPath('../../'))
|
|
||||||
try:
|
|
||||||
import py_post
|
|
||||||
except:
|
|
||||||
print('error: no valid Mentat release found')
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
if not options.type :
|
|
||||||
options.type = ['nodebased', 'ipbased']
|
|
||||||
else:
|
|
||||||
options.type = [options.type]
|
|
||||||
|
|
||||||
|
|
||||||
# --- initialize mesh data
|
|
||||||
|
|
||||||
if damask.core.mesh.mesh_init_postprocessing(filename+'.mesh'):
|
|
||||||
print('error: init not successful')
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
|
|
||||||
# --- check if ip data available for all elements; if not, then .t19 file is required
|
|
||||||
|
|
||||||
p = py_post.post_open(filename+'.t16')
|
|
||||||
asciiFile = False
|
|
||||||
p.moveto(1)
|
|
||||||
for e in range(p.elements()):
|
|
||||||
if not damask.core.mesh.mesh_get_nodeAtIP(str(p.element(e).type),1):
|
|
||||||
if os.path.exists(filename+'.t19'):
|
|
||||||
p.close()
|
|
||||||
p = py_post.post_open(filename+'.t19')
|
|
||||||
asciiFile = True
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
# --- parse *.output and *.t16 file
|
|
||||||
|
|
||||||
outputFormat = ParseOutputFormat(filename,options.homog,options.cryst,options.phase)
|
|
||||||
p.moveto(1)
|
|
||||||
p.extrapolation('translate')
|
|
||||||
stat = ParsePostfile(p,filename,outputFormat,options.legacy)
|
|
||||||
|
|
||||||
|
|
||||||
# --- output info
|
|
||||||
|
|
||||||
if options.info:
|
|
||||||
print '\n\nMentat release %s'%damask.solver.Marc().version('../../')
|
|
||||||
SummarizePostfile(stat)
|
|
||||||
SummarizeOutputfile(outputFormat)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
# --- create output dir
|
|
||||||
|
|
||||||
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
|
|
||||||
if not os.path.isdir(dirname):
|
|
||||||
os.mkdir(dirname,0755)
|
|
||||||
|
|
||||||
|
|
||||||
# --- get positions
|
|
||||||
|
|
||||||
[increments,locations] = GetIncrementLocations(p,stat['NumberOfIncrements'],options)
|
|
||||||
|
|
||||||
|
|
||||||
# --- loop over positions
|
|
||||||
|
|
||||||
time_start = time.time()
|
|
||||||
for incCount,position in enumerate(locations): # walk through locations
|
|
||||||
p.moveto(position+1) # wind to correct position
|
|
||||||
time_delta = (float(len(locations)) / float(incCount+1) - 1.0) * (time.time() - time_start)
|
|
||||||
sys.stdout.write("\r(%02i:%02i:%02i) processing increment %i of %i..."\
|
|
||||||
%(time_delta//3600,time_delta%3600//60,time_delta%60,incCount+1,len(locations)))
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
# --- write header
|
|
||||||
|
|
||||||
outFilename = {}
|
|
||||||
for geomtype in options.type:
|
|
||||||
outFilename[geomtype] = eval('"'+eval("'%%s_%%s_inc%%0%ii.txt'%(math.log10(max(increments+[1]))+1)")\
|
|
||||||
+'"%(dirname + os.sep + os.path.split(filename)[1],geomtype,increments[incCount])')
|
|
||||||
with open(outFilename[geomtype],'w') as myfile:
|
|
||||||
writeHeader(myfile,stat,geomtype)
|
|
||||||
|
|
||||||
# --- write node based data
|
|
||||||
|
|
||||||
if geomtype == 'nodebased':
|
|
||||||
for n in range(stat['NumberOfNodes']):
|
|
||||||
myfile.write(str(n))
|
|
||||||
for l in range(stat['NumberOfNodalScalars']):
|
|
||||||
myfile.write('\t'+str(p.node_scalar(n,l)))
|
|
||||||
myfile.write('\n')
|
|
||||||
|
|
||||||
# --- write ip based data
|
|
||||||
|
|
||||||
elif geomtype == 'ipbased':
|
|
||||||
for e in range(stat['NumberOfElements']):
|
|
||||||
if asciiFile:
|
|
||||||
print 'ascii postfile not yet supported'
|
|
||||||
sys.exit(-1)
|
|
||||||
else:
|
|
||||||
ipData = [[]]
|
|
||||||
for l in range(stat['NumberOfElementalScalars']):
|
|
||||||
data = p.element_scalar(e,l)
|
|
||||||
for i in range(len(data)): # at least as many nodes as ips
|
|
||||||
node = damask.core.mesh.mesh_get_nodeAtIP(str(p.element(e).type),i+1) # fortran indexing starts at 1
|
|
||||||
if not node: break # no more ips
|
|
||||||
while i >= len(ipData): ipData.append([])
|
|
||||||
ipData[i].extend([data[node-1].value]) # python indexing starts at 0
|
|
||||||
for i in range(len(ipData)):
|
|
||||||
myfile.write('\t'.join(map(str,[e,i]+ipData[i]))+'\n')
|
|
||||||
|
|
||||||
p.close()
|
|
||||||
sys.stdout.write("\n")
|
|
|
@ -1,82 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,glob,re
|
|
||||||
import damask
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
def findTag(filename,tag):
|
|
||||||
|
|
||||||
with open(filename,'r') as myfile:
|
|
||||||
mypattern = re.compile(str(tag))
|
|
||||||
for line in myfile:
|
|
||||||
if mypattern.search(line): return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
|
||||||
# MAIN FUNCTION STARTS HERE
|
|
||||||
# -----------------------------
|
|
||||||
|
|
||||||
# --- input parsing
|
|
||||||
|
|
||||||
parser = OptionParser(usage='%prog [options] directory', description = """
|
|
||||||
Add data from an ASCII table to a VTK geometry file.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-s','--sub', action='store_true', dest='subdir', \
|
|
||||||
help='include files in subdirectories [%default]')
|
|
||||||
parser.set_defaults(subdir = False)
|
|
||||||
|
|
||||||
(options, dirname) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
# --- sanity checks
|
|
||||||
|
|
||||||
if dirname == []:
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('no directory specified...')
|
|
||||||
else:
|
|
||||||
dirname = os.path.abspath(dirname[0]) # only use first argument
|
|
||||||
|
|
||||||
if not os.path.isdir(dirname):
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('invalid directory "%s" specified...'%dirname)
|
|
||||||
|
|
||||||
|
|
||||||
# --- loop over "nodebased" and "ipbased" data files and
|
|
||||||
# copy data to corresponding geometry files
|
|
||||||
|
|
||||||
dataSetTag = {'nodebased':'POINT_DATA', 'ipbased':'CELL_DATA'}
|
|
||||||
for geomtype in ['nodebased','ipbased']:
|
|
||||||
for vtkfilename in glob.iglob(dirname+os.sep+'*'+geomtype+'*.vtk'):
|
|
||||||
|
|
||||||
if not os.path.dirname(vtkfilename) == dirname and not options.subdir: continue # include files in subdir?
|
|
||||||
datafilename = os.path.splitext(vtkfilename)[0] + '.txt'
|
|
||||||
if not os.path.exists(datafilename): continue # no corresponding datafile found
|
|
||||||
|
|
||||||
# --- read data from datafile
|
|
||||||
|
|
||||||
with open(datafilename,'r') as datafile: # open datafile in read mode
|
|
||||||
table = damask.ASCIItable(fileIn=datafile) # use ASCIItable class to read data file
|
|
||||||
table.head_read() # read ASCII header info
|
|
||||||
myData = []
|
|
||||||
while table.data_read(): # read line in datafile
|
|
||||||
myData.append(table.data)
|
|
||||||
myData = zip(*myData) # reorder data: first index now label, not node
|
|
||||||
|
|
||||||
# --- append data to vtkfile
|
|
||||||
|
|
||||||
with open(vtkfilename,'a') as vtkfile: # open vtkfile in append mode
|
|
||||||
print vtkfilename
|
|
||||||
if not findTag(vtkfilename,dataSetTag[geomtype]): # check if data set is already present...
|
|
||||||
vtkfile.write(dataSetTag[geomtype] + ' %i'%len(myData[0])) # ... if not, write keyword
|
|
||||||
for idx,label in enumerate(table.labels): # write data
|
|
||||||
vtkfile.write('\nSCALARS '+label+' double 1\nLOOKUP_TABLE default\n') # all scalar data
|
|
||||||
vtkfile.write('\n'.join(map(str,myData[idx])))
|
|
|
@ -1,135 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,vtk
|
|
||||||
import damask
|
|
||||||
from collections import defaultdict
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
Add scalar and RGB tuples from ASCIItable to existing VTK voxel cloud (.vtu/.vtk).
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-v', '--vtk', dest='vtk', \
|
|
||||||
help = 'VTK file name')
|
|
||||||
parser.add_option('-s', '--scalar', dest='scalar', action='extend', \
|
|
||||||
help = 'scalar values')
|
|
||||||
parser.add_option('-c', '--color', dest='color', action='extend', \
|
|
||||||
help = 'RGB color tuples')
|
|
||||||
|
|
||||||
parser.set_defaults(scalar = [],
|
|
||||||
color = [],
|
|
||||||
render = False,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if options.vtk is None or not os.path.exists(options.vtk):
|
|
||||||
parser.error('VTK file does not exist')
|
|
||||||
|
|
||||||
if os.path.splitext(options.vtk)[1] == '.vtu':
|
|
||||||
reader = vtk.vtkXMLUnstructuredGridReader()
|
|
||||||
reader.SetFileName(options.vtk)
|
|
||||||
reader.Update()
|
|
||||||
uGrid = reader.GetOutput()
|
|
||||||
elif os.path.splitext(options.vtk)[1] == '.vtk':
|
|
||||||
reader = vtk.vtkGenericDataObjectReader()
|
|
||||||
reader.SetFileName(options.vtk)
|
|
||||||
reader.Update()
|
|
||||||
uGrid = reader.GetUnstructuredGridOutput()
|
|
||||||
else:
|
|
||||||
parser.error('unsupported VTK file type extension')
|
|
||||||
|
|
||||||
Npoints = uGrid.GetNumberOfPoints()
|
|
||||||
Ncells = uGrid.GetNumberOfCells()
|
|
||||||
|
|
||||||
sys.stderr.write('{}: {} points and {} cells...\n'.format(damask.util.emph(options.vtk),Npoints,Ncells))
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False, readonly = True)
|
|
||||||
except: continue
|
|
||||||
damask.util.croak(damask.util.emph(scriptName)+(': '+name if name else ''))
|
|
||||||
|
|
||||||
# --- interpret header ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
table.head_read()
|
|
||||||
|
|
||||||
remarks = []
|
|
||||||
errors = []
|
|
||||||
VTKarray = {}
|
|
||||||
active = defaultdict(list)
|
|
||||||
|
|
||||||
for datatype,dimension,label in [['scalar',1,options.scalar],
|
|
||||||
['color',3,options.color],
|
|
||||||
]:
|
|
||||||
for i,dim in enumerate(table.label_dimension(label)):
|
|
||||||
me = label[i]
|
|
||||||
if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me))
|
|
||||||
elif dim > dimension: remarks.append('"{}" not of dimension{}...'.format(me,dimension))
|
|
||||||
else:
|
|
||||||
damask.util.croak('adding {} {}'.format(datatype,me))
|
|
||||||
active[datatype].append(me)
|
|
||||||
|
|
||||||
if datatype in ['scalar']:
|
|
||||||
VTKarray[me] = vtk.vtkDoubleArray()
|
|
||||||
elif datatype == 'color':
|
|
||||||
VTKarray[me] = vtk.vtkUnsignedCharArray()
|
|
||||||
|
|
||||||
VTKarray[me].SetNumberOfComponents(dimension)
|
|
||||||
VTKarray[me].SetName(label[i])
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss=True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ---------------------------------------
|
|
||||||
|
|
||||||
while table.data_read(): # read next data line of ASCII table
|
|
||||||
|
|
||||||
for datatype,labels in active.items(): # loop over scalar,color
|
|
||||||
for me in labels: # loop over all requested items
|
|
||||||
theData = [table.data[i] for i in table.label_indexrange(me)] # read strings
|
|
||||||
if datatype == 'color':
|
|
||||||
VTKarray[me].InsertNextTuple3(*map(lambda x: int(255.*float(x)),theData))
|
|
||||||
elif datatype == 'scalar':
|
|
||||||
VTKarray[me].InsertNextValue(float(theData[0]))
|
|
||||||
|
|
||||||
# ------------------------------------------ add data ---------------------------------------
|
|
||||||
|
|
||||||
for datatype,labels in active.items(): # loop over scalar,color
|
|
||||||
if datatype == 'color':
|
|
||||||
uGrid.GetCellData().SetScalars(VTKarray[active['color'][0]])
|
|
||||||
for label in labels: # loop over all requested items
|
|
||||||
uGrid.GetCellData().AddArray(VTKarray[me])
|
|
||||||
|
|
||||||
uGrid.Modified()
|
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
|
||||||
uGrid.Update()
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
|
||||||
|
|
||||||
writer = vtk.vtkXMLUnstructuredGridWriter()
|
|
||||||
writer.SetDataModeToBinary()
|
|
||||||
writer.SetCompressorTypeToZLib()
|
|
||||||
writer.SetFileName(os.path.splitext(options.vtk)[0]+'_added.vtu')
|
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
|
||||||
writer.SetInput(uGrid)
|
|
||||||
else:
|
|
||||||
writer.SetInputData(uGrid)
|
|
||||||
writer.Write()
|
|
|
@ -1,122 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,shutil
|
|
||||||
import damask
|
|
||||||
from optparse import OptionParser
|
|
||||||
import vtk
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-v','--vector', nargs=3, dest='vector', \
|
|
||||||
help='suffices indicating vector components [%default]')
|
|
||||||
parser.add_option('-s','--separator', dest='separator', \
|
|
||||||
help='separator between label and suffix [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(vector = ['x','y','z'])
|
|
||||||
parser.set_defaults(separator = '.')
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
# --- sanity checks
|
|
||||||
|
|
||||||
if filenames == []:
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('no file specified...')
|
|
||||||
|
|
||||||
for filename in filenames:
|
|
||||||
if not os.path.isfile(filename):
|
|
||||||
parser.print_help()
|
|
||||||
parser.error('invalid file "%s" specified...'%filename)
|
|
||||||
|
|
||||||
|
|
||||||
# --- ITERATE OVER FILES AND PROCESS THEM
|
|
||||||
|
|
||||||
for filename in filenames:
|
|
||||||
|
|
||||||
sys.stdout.write('read file "%s" ...'%filename)
|
|
||||||
sys.stdout.flush()
|
|
||||||
suffix = os.path.splitext(filename)[1]
|
|
||||||
if suffix == '.vtk':
|
|
||||||
reader = vtk.vtkUnstructuredGridReader()
|
|
||||||
reader.ReadAllScalarsOn()
|
|
||||||
reader.ReadAllVectorsOn()
|
|
||||||
reader.ReadAllTensorsOn()
|
|
||||||
elif suffix == '.vtu':
|
|
||||||
reader = vtk.vtkXMLUnstructuredGridReader()
|
|
||||||
else:
|
|
||||||
parser.error('filetype "%s" not supported'%suffix)
|
|
||||||
reader.SetFileName(filename)
|
|
||||||
reader.Update()
|
|
||||||
uGrid = reader.GetOutput()
|
|
||||||
sys.stdout.write(' done\n')
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
|
|
||||||
# Read the scalar data
|
|
||||||
|
|
||||||
scalarData = {}
|
|
||||||
scalarsToBeRemoved = []
|
|
||||||
Nscalars = uGrid.GetCellData().GetNumberOfArrays()
|
|
||||||
for i in range(Nscalars):
|
|
||||||
sys.stdout.write("\rread scalar data %d%%" %(100*i/Nscalars))
|
|
||||||
sys.stdout.flush()
|
|
||||||
scalarName = uGrid.GetCellData().GetArrayName(i)
|
|
||||||
if scalarName.split(options.separator)[-1] in options.vector:
|
|
||||||
label,suffix = scalarName.split(options.separator)
|
|
||||||
if label not in scalarData:
|
|
||||||
scalarData[label] = [[],[],[]]
|
|
||||||
uGrid.GetCellData().SetActiveScalars(scalarName)
|
|
||||||
scalarData[label][options.vector.index(suffix)] = uGrid.GetCellData().GetScalars(scalarName)
|
|
||||||
scalarsToBeRemoved.append(scalarName)
|
|
||||||
for scalarName in scalarsToBeRemoved:
|
|
||||||
uGrid.GetCellData().RemoveArray(scalarName)
|
|
||||||
sys.stdout.write('\rread scalar data done\n')
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
|
|
||||||
# Convert the scalar data to vector data
|
|
||||||
|
|
||||||
NscalarData = len(scalarData)
|
|
||||||
for n,label in enumerate(scalarData):
|
|
||||||
sys.stdout.write("\rconvert to vector data %d%%" %(100*n/NscalarData))
|
|
||||||
sys.stdout.flush()
|
|
||||||
Nvalues = scalarData[label][0].GetNumberOfTuples()
|
|
||||||
vectorData = vtk.vtkDoubleArray()
|
|
||||||
vectorData.SetName(label)
|
|
||||||
vectorData.SetNumberOfComponents(3) # set this before NumberOfTuples !!!
|
|
||||||
vectorData.SetNumberOfTuples(Nvalues)
|
|
||||||
for i in range(Nvalues):
|
|
||||||
for j in range(3):
|
|
||||||
vectorData.SetComponent(i,j,scalarData[label][j].GetValue(i))
|
|
||||||
uGrid.GetCellData().AddArray(vectorData)
|
|
||||||
sys.stdout.write('\rconvert to vector data done\n')
|
|
||||||
|
|
||||||
|
|
||||||
# Write to new vtk file
|
|
||||||
|
|
||||||
outfilename = os.path.splitext(filename)[0]+'.vtu'
|
|
||||||
sys.stdout.write('write to file "%s" ...'%outfilename)
|
|
||||||
sys.stdout.flush()
|
|
||||||
writer = vtk.vtkXMLUnstructuredGridWriter()
|
|
||||||
writer.SetFileName(outfilename+'_tmp')
|
|
||||||
writer.SetDataModeToAscii()
|
|
||||||
writer.SetInput(uGrid)
|
|
||||||
writer.Write()
|
|
||||||
sys.stdout.write(' done\n')
|
|
||||||
sys.stdout.flush()
|
|
||||||
shutil.move(outfilename+'_tmp',outfilename)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------- DONE --------------------------------
|
|
|
@ -1,118 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,vtk
|
|
||||||
import numpy as np
|
|
||||||
from optparse import OptionParser
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
Create hexahedral voxels around points in an ASCIItable.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-d', '--deformed',
|
|
||||||
dest = 'deformed',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'deformed coordinate label [%default]')
|
|
||||||
parser.add_option('-c','--coordinates',
|
|
||||||
dest = 'coords',
|
|
||||||
type = 'string', metavar='string',
|
|
||||||
help = 'undeformed coordinates label [%default]')
|
|
||||||
parser.set_defaults(deformed = 'ipdeformedcoord',
|
|
||||||
coords = 'ipinitialcoord',
|
|
||||||
)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False, readonly = True)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
# --------------- interprete header -----------------------------------------------------------------
|
|
||||||
table.head_read()
|
|
||||||
errors=[]
|
|
||||||
if table.label_dimension(options.deformed) != 3:
|
|
||||||
errors.append('columns "{}" have dimension {}'.format(options.deformed,table.label_dimension(options.deformed)))
|
|
||||||
if table.label_dimension(options.coords) != 3:
|
|
||||||
errors.append('coordinates {} are not a vector.'.format(options.coords))
|
|
||||||
|
|
||||||
table.data_readArray([options.coords,options.deformed])
|
|
||||||
|
|
||||||
# --------------- figure out size and grid ---------------------------------------------------------
|
|
||||||
coords = [{},{},{}]
|
|
||||||
for i in xrange(len(table.data)):
|
|
||||||
for j in xrange(3):
|
|
||||||
coords[j][str(table.data[i,table.label_index(options.coords)+j])] = True
|
|
||||||
grid = np.array(map(len,coords),'i')
|
|
||||||
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
|
|
||||||
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
|
|
||||||
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
|
|
||||||
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
|
|
||||||
],'d') # size from bounding box, corrected for cell-centeredness
|
|
||||||
|
|
||||||
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ---------------------------------------
|
|
||||||
hexPoints = np.array([[-1,-1,-1],
|
|
||||||
[ 1,-1,-1],
|
|
||||||
[ 1, 1,-1],
|
|
||||||
[-1, 1,-1],
|
|
||||||
[-1,-1, 1],
|
|
||||||
[ 1,-1, 1],
|
|
||||||
[ 1, 1, 1],
|
|
||||||
[-1, 1, 1],
|
|
||||||
])
|
|
||||||
|
|
||||||
halfDelta = 0.5*size/grid
|
|
||||||
|
|
||||||
Points = vtk.vtkPoints()
|
|
||||||
Hex = vtk.vtkHexahedron()
|
|
||||||
uGrid = vtk.vtkUnstructuredGrid()
|
|
||||||
|
|
||||||
for p in table.data:
|
|
||||||
for i,h in enumerate(hexPoints):
|
|
||||||
pointID = Points.InsertNextPoint(p[table.label_index(options.deformed):table.label_index(options.deformed)+3]+h*halfDelta)
|
|
||||||
Hex.GetPointIds().SetId(i,pointID)
|
|
||||||
|
|
||||||
uGrid.InsertNextCell(Hex.GetCellType(), Hex.GetPointIds())
|
|
||||||
|
|
||||||
uGrid.SetPoints(Points)
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
|
||||||
|
|
||||||
if name:
|
|
||||||
writer = vtk.vtkXMLUnstructuredGridWriter()
|
|
||||||
(directory,filename) = os.path.split(name)
|
|
||||||
writer.SetDataModeToBinary()
|
|
||||||
writer.SetCompressorTypeToZLib()
|
|
||||||
writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0]\
|
|
||||||
+'.'+writer.GetDefaultFileExtension()))
|
|
||||||
else:
|
|
||||||
writer = vtk.vtkDataSetWriter()
|
|
||||||
writer.WriteToOutputStringOn()
|
|
||||||
writer.SetHeader('# powered by '+scriptID)
|
|
||||||
|
|
||||||
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(uGrid)
|
|
||||||
else: writer.SetInputData(uGrid)
|
|
||||||
writer.Write()
|
|
||||||
if name is None: sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()])
|
|
||||||
|
|
||||||
table.close() # close input ASCII table
|
|
||||||
|
|
|
@ -1,110 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: UTF-8 no BOM -*-
|
|
||||||
|
|
||||||
import os,sys,math
|
|
||||||
import numpy as np
|
|
||||||
from optparse import OptionParser
|
|
||||||
from PIL import Image
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
|
|
||||||
Generate geometry description from (multilayer) images.
|
|
||||||
Microstructure index is based on gray scale value (1..256).
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('--homogenization',
|
|
||||||
dest = 'homogenization',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'homogenization index [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(homogenization = 1,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
outname = os.path.splitext(name)[0]+'.geom' if name else name,
|
|
||||||
buffered = False, labeled = False)
|
|
||||||
except: continue
|
|
||||||
table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else ''))
|
|
||||||
|
|
||||||
# --- read image ------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
img = Image.open(name).convert(mode = 'L') # open and convert to grayscale 8bit
|
|
||||||
|
|
||||||
slice = 0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
img.seek(slice) # advance to slice
|
|
||||||
layer = np.expand_dims(1+np.array(img,dtype = 'uint16'),axis = 0) # read image layer
|
|
||||||
microstructure = layer if slice == 0 else np.vstack((microstructure,layer)) # noqa
|
|
||||||
slice += 1 # advance to next slice
|
|
||||||
except EOFError:
|
|
||||||
break
|
|
||||||
|
|
||||||
# http://docs.scipy.org/doc/scipy/reference/ndimage.html
|
|
||||||
# http://scipy-lectures.github.io/advanced/image_processing/
|
|
||||||
|
|
||||||
info = {
|
|
||||||
'grid': np.array(microstructure.shape,'i')[::-1],
|
|
||||||
'size': np.array(microstructure.shape,'d')[::-1],
|
|
||||||
'origin': np.zeros(3,'d'),
|
|
||||||
'microstructures': len(np.unique(microstructure)),
|
|
||||||
'homogenization': options.homogenization,
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- report ---------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
|
|
||||||
'size x y z: %s'%(' x '.join(map(str,info['size']))),
|
|
||||||
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
|
|
||||||
'homogenization: %i'%info['homogenization'],
|
|
||||||
'microstructures: %i'%info['microstructures'],
|
|
||||||
])
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
|
|
||||||
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
|
|
||||||
if errors != []:
|
|
||||||
table.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# --- write header ---------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
table.info_clear()
|
|
||||||
table.info_append([
|
|
||||||
scriptID + ' ' + ' '.join(sys.argv[1:]),
|
|
||||||
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']),
|
|
||||||
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']),
|
|
||||||
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']),
|
|
||||||
"homogenization\t{homog}".format(homog=info['homogenization']),
|
|
||||||
"microstructures\t{microstructures}".format(microstructures=info['microstructures']),
|
|
||||||
])
|
|
||||||
table.labels_clear()
|
|
||||||
table.head_write()
|
|
||||||
table.output_flush()
|
|
||||||
|
|
||||||
# --- write microstructure information ------------------------------------------------------------
|
|
||||||
|
|
||||||
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
|
|
||||||
table.data = microstructure.reshape((info['grid'][1]*info['grid'][2],info['grid'][0]),order='C')
|
|
||||||
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
|
|
||||||
|
|
||||||
# --- output finalization --------------------------------------------------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII table
|
|
Loading…
Reference in New Issue