wildcard matching for column headings, e.g. "[159]_p" to extract only the diagonal elements (scalars) of stress, or "[1-9]_p" to extract all nine values.

fixed VTK file format problem where double precision floats were incorrectly interpreted because they were announced as "float" (now "double").
This commit is contained in:
Philip Eisenlohr 2013-05-29 10:24:00 +00:00
parent fab0ea8e4e
commit 32a16f9745
1 changed files with 29 additions and 16 deletions

View File

@ -5,7 +5,8 @@
# As it reads in the data coming from "materialpoint_results", it can be adopted to the data # As it reads in the data coming from "materialpoint_results", it can be adopted to the data
# computed using the FEM solvers. Its capable to handle elements with one IP in a regular order # computed using the FEM solvers. Its capable to handle elements with one IP in a regular order
import os,sys,threading,re,numpy,time,string,damask import os,sys,threading,re,numpy,time,string,fnmatch
import damask
from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP from optparse import OptionParser, OptionGroup, Option, SUPPRESS_HELP
# ----------------------------- # -----------------------------
@ -131,7 +132,7 @@ def vtk_writeASCII_mesh(mesh,data,res,sep):
string.replace('powered by $Id$','\n','\\n'), string.replace('powered by $Id$','\n','\\n'),
'ASCII', 'ASCII',
'DATASET UNSTRUCTURED_GRID', 'DATASET UNSTRUCTURED_GRID',
'POINTS %i float'%N1, 'POINTS %i double'%N1,
[[['\t'.join(map(str,mesh[:,i,j,k])) for i in range(res[0]+1)] for j in range(res[1]+1)] for k in range(res[2]+1)], [[['\t'.join(map(str,mesh[:,i,j,k])) for i in range(res[0]+1)] for j in range(res[1]+1)] for k in range(res[2]+1)],
'CELLS %i %i'%(N,N*9), 'CELLS %i %i'%(N,N*9),
] ]
@ -159,9 +160,9 @@ def vtk_writeASCII_mesh(mesh,data,res,sep):
for type in data: for type in data:
plural = {True:'',False:'S'}[type.lower().endswith('s')] plural = {True:'',False:'S'}[type.lower().endswith('s')]
for item in data[type]: for item in data[type]['_order_']:
cmds += [\ cmds += [\
'%s %s float %i'%(info[type]['name'].upper()+plural,item,info[type]['len']), '%s %s double %i'%(info[type]['name'].upper()+plural,item,info[type]['len']),
{True:'LOOKUP_TABLE default',False:''}[info[type]['name'][:3]=='sca'], {True:'LOOKUP_TABLE default',False:''}[info[type]['name'][:3]=='sca'],
[[[sep.join(map(unravel,data[type][item][:,j,k]))] for j in range(res[1])] for k in range(res[2])], [[[sep.join(map(unravel,data[type][item][:,j,k]))] for j in range(res[1])] for k in range(res[2])],
] ]
@ -228,7 +229,7 @@ def gmsh_writeASCII_mesh(mesh,data,res,sep):
for type in data: for type in data:
plural = {True:'',False:'S'}[type.lower().endswith('s')] plural = {True:'',False:'S'}[type.lower().endswith('s')]
for item in data[type]: for item in data[type]['_order_']:
cmds += [\ cmds += [\
'%s %s float %i'%(info[type]['name'].upper()+plural,item,info[type]['len']), '%s %s float %i'%(info[type]['name'].upper()+plural,item,info[type]['len']),
'LOOKUP_TABLE default', 'LOOKUP_TABLE default',
@ -395,6 +396,7 @@ for filename in args:
headrow = int(m.group(1)) headrow = int(m.group(1))
headings = content[headrow].split() headings = content[headrow].split()
column = {} column = {}
matches = {}
maxcol = 0 maxcol = 0
for col,head in enumerate(headings): for col,head in enumerate(headings):
@ -406,14 +408,16 @@ for filename in args:
if locol < 0: if locol < 0:
print 'missing coordinates..!' print 'missing coordinates..!'
continue continue
column['tensor'] = {} column['tensor'] = {}
matches['tensor'] = {}
for label in [options.defgrad] + options.tensor: for label in [options.defgrad] + options.tensor:
column['tensor'][label] = -1 column['tensor'][label] = -1
for col,head in enumerate(headings): for col,head in enumerate(headings):
if head == label or head == '1_'+label: if head == label or head == '1_'+label:
column['tensor'][label] = col column['tensor'][label] = col
maxcol = max(maxcol,col+9) maxcol = max(maxcol,col+9)
matches['tensor'][label] = [label]
break break
if not options.undeformed and column['tensor'][options.defgrad] < 0: if not options.undeformed and column['tensor'][options.defgrad] < 0:
@ -421,23 +425,29 @@ for filename in args:
continue continue
column['vector'] = {} column['vector'] = {}
matches['tensor'] = {}
for label in options.vector: for label in options.vector:
column['vector'][label] = -1 column['vector'][label] = -1
for col,head in enumerate(headings): for col,head in enumerate(headings):
if head == label or head == '1_'+label: if head == label or head == '1_'+label:
column['vector'][label] = col column['vector'][label] = col
maxcol = max(maxcol,col+3) maxcol = max(maxcol,col+3)
matches['vector'][label] = [label]
break break
for length,what in enumerate(['scalar','double','triple','quadruple']): for length,what in enumerate(['scalar','double','triple','quadruple']):
column[what] = {} column[what] = {}
for label in eval('options.'+what): labels = eval("options.%s"%what)
column[what][label] = -1 matches[what] = {}
for col,head in enumerate(headings): for col,head in enumerate(headings):
if head == label or head == '1_'+label: for needle in labels:
column[what][label] = col if fnmatch.fnmatch(head,needle):
column[what][head] = col
maxcol = max(maxcol,col+1+length) maxcol = max(maxcol,col+1+length)
break if needle not in matches[what]:
matches[what][needle] = [head]
else:
matches[what][needle] += [head]
values = numpy.array(sorted([map(transliterateToFloat,line.split()[:maxcol]) for line in content[headrow+1:]], values = numpy.array(sorted([map(transliterateToFloat,line.split()[:maxcol]) for line in content[headrow+1:]],
@ -530,11 +540,14 @@ for filename in args:
for datatype in fields.keys(): for datatype in fields.keys():
print '\n%s:'%datatype, print '\n%s:'%datatype,
fields[datatype]['_order_'] = []
for what in eval('options.'+datatype): for what in eval('options.'+datatype):
col = column[datatype][what] for label in matches[datatype][what]:
if col != -1: col = column[datatype][label]
print what, if col != -1:
fields[datatype][what] = numpy.reshape(values[:,col:col+length[datatype]],[res[0],res[1],res[2]]+reshape[datatype]) print label,
fields[datatype][label] = numpy.reshape(values[:,col:col+length[datatype]],[res[0],res[1],res[2]]+reshape[datatype])
fields[datatype]['_order_'] += [label]
print '\n' print '\n'
out = {} out = {}