write out point data

might be possible to use a rectilinear grid for this also in the case of
grid solvers
This commit is contained in:
Martin Diehl 2019-10-12 12:34:37 +02:00
parent a2a05158f2
commit 513f1c6726
2 changed files with 127 additions and 1 deletions

View File

@ -40,7 +40,7 @@ if options.con is None: options.con=[]
for filename in options.filenames: for filename in options.filenames:
results = damask.DADF5(filename) results = damask.DADF5(filename)
if results.structured: # for grid solvers use rectilinear grid if results.structured: # for grid solvers use rectilinear grid
rGrid = vtk.vtkRectilinearGrid() rGrid = vtk.vtkRectilinearGrid()
coordArray = [vtk.vtkDoubleArray(), coordArray = [vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(), vtk.vtkDoubleArray(),

View File

@ -0,0 +1,126 @@
#!/usr/bin/env python3
import os
import argparse
import numpy as np
import vtk
from vtk.util import numpy_support
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = argparse.ArgumentParser()
#ToDo: We need to decide on a way of handling arguments of variable lentght
#https://stackoverflow.com/questions/15459997/passing-integer-lists-to-python
#parser.add_argument('--version', action='version', version='%(prog)s {}'.format(scriptID))
parser.add_argument('filenames', nargs='+',
help='DADF5 files')
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory relative to the location of the DADF5 file to hold output')
parser.add_argument('--mat', nargs='+',
help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+',
help='labels for constituent',dest='con')
options = parser.parse_args()
if options.mat is None: options.mat=[]
if options.con is None: options.con=[]
# --- loop over input files ------------------------------------------------------------------------
for filename in options.filenames:
results = damask.DADF5(filename)
Polydata = vtk.vtkPolyData()
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
if results.structured: # for grid solvers calculate points
delta = results.size/results.grid*0.5
for z in np.linspace(delta[2],results.size[2]-delta[2],results.grid[2]):
for y in np.linspace(delta[1],results.size[1]-delta[1],results.grid[1]):
for x in np.linspace(delta[0],results.size[0]-delta[0],results.grid[0]):
pointID = Points.InsertNextPoint([x,y,z])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(pointID)
Polydata.SetPoints(Points)
Polydata.SetVerts(Vertices)
Polydata.Modified()
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
vtk_data = []
results.set_visible('materialpoints',False)
results.set_visible('constituents', True)
for label in options.con:
for p in results.iter_visible('con_physics'):
if p != 'generic':
for c in results.iter_visible('constituents'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
results.set_visible('constituents', False)
results.set_visible('materialpoints',True)
for label in options.mat:
for p in results.iter_visible('mat_physics'):
if p != 'generic':
for m in results.iter_visible('materialpoints'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
Polydata.GetCellData().AddArray(vtk_data[-1])
writer = vtk.vtkXMLPolyDataWriter()
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):
os.mkdir(dirname,0o755)
file_out = '{}_{}.{}'.format(os.path.splitext(os.path.split(filename)[-1])[0],inc,writer.GetDefaultFileExtension())
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()
writer.SetFileName(os.path.join(dirname,file_out))
writer.SetInputData(Polydata)
writer.Write()