visualizing data from DADF5: first prototype

This commit is contained in:
Martin Diehl 2019-04-18 11:58:17 +02:00
parent 7c771647ad
commit 6b7fd6b7ea
2 changed files with 87 additions and 28 deletions

View File

@ -24,22 +24,23 @@ parser.add_argument('filenames', nargs='+',
options = parser.parse_args() options = parser.parse_args()
options.labels = ['Fe','Fp','xi_sl']
# --- loop over input files ------------------------------------------------------------------------ # --- loop over input files ------------------------------------------------------------------------
for filename in options.filenames: for filename in options.filenames:
data = damask.DADF5(filename) results = damask.DADF5(filename)
if data.structured: # for grid solvers use rectilinear grid if results.structured: # for grid solvers use rectilinear grid
rGrid = vtk.vtkRectilinearGrid() rGrid = vtk.vtkRectilinearGrid()
coordArray = [vtk.vtkDoubleArray(), coordArray = [vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(), vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(), vtk.vtkDoubleArray(),
] ]
rGrid.SetDimensions(*(data.grid+1)) rGrid.SetDimensions(*(results.grid+1))
for dim in [0,1,2]: for dim in [0,1,2]:
for c in np.linspace(0,data.size[dim],1+data.grid[dim]): for c in np.linspace(0,results.size[dim],1+results.grid[dim]):
coordArray[dim].InsertNextValue(c) coordArray[dim].InsertNextValue(c)
rGrid.SetXCoordinates(coordArray[0]) rGrid.SetXCoordinates(coordArray[0])
@ -47,22 +48,45 @@ for filename in options.filenames:
rGrid.SetZCoordinates(coordArray[2]) rGrid.SetZCoordinates(coordArray[2])
for i,inc in enumerate(data.increments): for i,inc in enumerate(results.increments):
data.active['increments'] = [inc] print('Output step {}/{}'.format(i+1,len(results.increments)))
x = data.get_dataset_location('xi_sl')[0] vtk_data = []
VTKarray = numpy_support.numpy_to_vtk(num_array=data.read_dataset(x,0),deep=True,array_type= vtk.VTK_DOUBLE) results.active['increments'] = [inc]
VTKarray.SetName('xi_sl') for label in options.labels:
rGrid.GetCellData().AddArray(VTKarray) for o in results.c_output_types:
if data.structured: results.active['c_output_types'] = [o]
if o != 'generic':
for c in results.constituents:
results.active['constituents'] = [c]
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
rGrid.GetCellData().AddArray(vtk_data[-1])
else:
results.active['constituents'] = results.constituents
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/')[1]+'/generic/'+label)
rGrid.GetCellData().AddArray(vtk_data[-1])
if results.structured:
writer = vtk.vtkXMLRectilinearGridWriter() writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary() writer.SetDataModeToBinary()
writer.SetFileName(os.path.join(os.path.split(filename)[0], writer.SetFileName(os.path.join(os.path.split(filename)[0],
os.path.splitext(os.path.split(filename)[1])[0] + os.path.splitext(os.path.split(filename)[1])[0] +
'_inc{:04d}'.format(i) + # ToDo: adjust to lenght of increments '_inc{:04d}'.format(i) + # ToDo: adjust to length of increments
'.' + writer.GetDefaultFileExtension())) '.' + writer.GetDefaultFileExtension()))
if data.structured: if results.structured:
writer.SetInputData(rGrid) writer.SetInputData(rGrid)
writer.Write() writer.Write()

View File

@ -35,29 +35,56 @@ class DADF5():
'time': round(f[u].attrs['time/s'],12), 'time': round(f[u].attrs['time/s'],12),
} for u in f.keys() if r.match(u)] } for u in f.keys() if r.match(u)]
self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name
self.constituents = [c.decode() for c in self.constituents] self.constituents = [c.decode() for c in self.constituents]
self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name
self.materialpoints = [m.decode() for m in self.materialpoints]
self.Nconstitutents = np.shape(f['mapping/cellResults/constituent'])[1]
self.Nmaterialpoints= np.shape(f['mapping/cellResults/constituent'])[0]
self.active= {'increments' :self.increments, self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name
'constituents' :self.constituents, self.materialpoints = [m.decode() for m in self.materialpoints]
'materialpoints':self.materialpoints}
self.Nconstituents = [i for i in range(np.shape(f['mapping/cellResults/constituent'])[1])]
self.Nmaterialpoints = np.shape(f['mapping/cellResults/constituent'])[0]
self.c_output_types = []
for c in self.constituents:
for o in f['inc{:05}/constituent/{}'.format(self.increments[0]['inc'],c)].keys():
self.c_output_types.append(o)
self.c_output_types = list(set(self.c_output_types)) # make unique
self.active= {'increments': self.increments,
'constituents': self.constituents,
'materialpoints': self.materialpoints,
'constituent': self.Nconstituents,
'c_output_types': self.c_output_types}
self.filename = filename self.filename = filename
self.mode = mode self.mode = mode
def list_data(self):
"""Shows information on all datasets in the file"""
with h5py.File(self.filename,'r') as f:
group_inc = 'inc{:05}'.format(self.active['increments'][0]['inc'])
for c in self.active['constituents']:
print('\n'+c)
group_constituent = group_inc+'/constituent/'+c
for t in self.active['c_output_types']:
print(' {}'.format(t))
group_output_types = group_constituent+'/'+t
try:
for x in f[group_output_types].keys():
print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode()))
except:
pass
def get_dataset_location(self,label): def get_dataset_location(self,label):
"""Returns the location of all active datasets with given label"""
path = [] path = []
with h5py.File(self.filename,'r') as f: with h5py.File(self.filename,'r') as f:
for i in self.active['increments']: for i in self.active['increments']:
group_inc = 'inc{:05}'.format(i['inc']) group_inc = 'inc{:05}'.format(i['inc'])
for c in self.active['constituents']: for c in self.active['constituents']:
group_constituent = group_inc+'/constituent/'+c group_constituent = group_inc+'/constituent/'+c
for t in f[group_constituent].keys(): for t in self.active['c_output_types']:
try: try:
f[group_constituent+'/'+t+'/'+label] f[group_constituent+'/'+t+'/'+label]
path.append(group_constituent+'/'+t+'/'+label) path.append(group_constituent+'/'+t+'/'+label)
@ -67,12 +94,20 @@ class DADF5():
def read_dataset(self,path,c): def read_dataset(self,path,c):
"""
Dataset for all points/cells
If more than one path is given, the dataset is composed of the individual contributions
"""
with h5py.File(self.filename,'r') as f: with h5py.File(self.filename,'r') as f:
shape = (self.Nmaterialpoints,) + np.shape(f[path])[1:] shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:]
dataset = np.full(shape,np.nan) dataset = np.full(shape,np.nan)
label = path.split('/')[2] for pa in path:
p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label)) label = pa.split('/')[2]
for s in p: dataset[s,:] = f[path][s,:] p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0]
u = (f['mapping/cellResults/constituent'][p,c]['Position'])
dataset[p,:] = f[pa][u,:]
return dataset return dataset