diff --git a/processing/post/DADF5_vtk_cells.py b/processing/post/DADF5_vtk_cells.py index 85b999a19..3bbf9fd45 100755 --- a/processing/post/DADF5_vtk_cells.py +++ b/processing/post/DADF5_vtk_cells.py @@ -24,22 +24,23 @@ parser.add_argument('filenames', nargs='+', options = parser.parse_args() +options.labels = ['Fe','Fp','xi_sl'] # --- loop over input files ------------------------------------------------------------------------ for filename in options.filenames: - data = damask.DADF5(filename) + results = damask.DADF5(filename) - if data.structured: # for grid solvers use rectilinear grid + if results.structured: # for grid solvers use rectilinear grid rGrid = vtk.vtkRectilinearGrid() coordArray = [vtk.vtkDoubleArray(), vtk.vtkDoubleArray(), vtk.vtkDoubleArray(), ] - rGrid.SetDimensions(*(data.grid+1)) + rGrid.SetDimensions(*(results.grid+1)) for dim in [0,1,2]: - for c in np.linspace(0,data.size[dim],1+data.grid[dim]): + for c in np.linspace(0,results.size[dim],1+results.grid[dim]): coordArray[dim].InsertNextValue(c) rGrid.SetXCoordinates(coordArray[0]) @@ -47,22 +48,45 @@ for filename in options.filenames: rGrid.SetZCoordinates(coordArray[2]) - for i,inc in enumerate(data.increments): - data.active['increments'] = [inc] - x = data.get_dataset_location('xi_sl')[0] - VTKarray = numpy_support.numpy_to_vtk(num_array=data.read_dataset(x,0),deep=True,array_type= vtk.VTK_DOUBLE) - VTKarray.SetName('xi_sl') - rGrid.GetCellData().AddArray(VTKarray) - if data.structured: + for i,inc in enumerate(results.increments): + print('Output step {}/{}'.format(i+1,len(results.increments))) + vtk_data = [] + results.active['increments'] = [inc] + for label in options.labels: + for o in results.c_output_types: + results.active['c_output_types'] = [o] + if o != 'generic': + for c in results.constituents: + results.active['constituents'] = [c] + x = results.get_dataset_location(label) + if len(x) == 0: + continue + array = results.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) + rGrid.GetCellData().AddArray(vtk_data[-1]) + else: + results.active['constituents'] = results.constituents + x = results.get_dataset_location(label) + if len(x) == 0: + continue + array = results.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/')[1]+'/generic/'+label) + rGrid.GetCellData().AddArray(vtk_data[-1]) + + if results.structured: writer = vtk.vtkXMLRectilinearGridWriter() writer.SetCompressorTypeToZLib() writer.SetDataModeToBinary() writer.SetFileName(os.path.join(os.path.split(filename)[0], os.path.splitext(os.path.split(filename)[1])[0] + - '_inc{:04d}'.format(i) + # ToDo: adjust to lenght of increments + '_inc{:04d}'.format(i) + # ToDo: adjust to length of increments '.' + writer.GetDefaultFileExtension())) - if data.structured: + if results.structured: writer.SetInputData(rGrid) writer.Write() diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 043997547..887c32338 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -35,29 +35,56 @@ class DADF5(): 'time': round(f[u].attrs['time/s'],12), } for u in f.keys() if r.match(u)] - self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name - self.constituents = [c.decode() for c in self.constituents] - self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name - self.materialpoints = [m.decode() for m in self.materialpoints] - self.Nconstitutents = np.shape(f['mapping/cellResults/constituent'])[1] - self.Nmaterialpoints= np.shape(f['mapping/cellResults/constituent'])[0] + self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name + self.constituents = [c.decode() for c in self.constituents] - self.active= {'increments' :self.increments, - 'constituents' :self.constituents, - 'materialpoints':self.materialpoints} + self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name + self.materialpoints = [m.decode() for m in self.materialpoints] + + self.Nconstituents = [i for i in range(np.shape(f['mapping/cellResults/constituent'])[1])] + self.Nmaterialpoints = np.shape(f['mapping/cellResults/constituent'])[0] + + self.c_output_types = [] + for c in self.constituents: + for o in f['inc{:05}/constituent/{}'.format(self.increments[0]['inc'],c)].keys(): + self.c_output_types.append(o) + self.c_output_types = list(set(self.c_output_types)) # make unique + + self.active= {'increments': self.increments, + 'constituents': self.constituents, + 'materialpoints': self.materialpoints, + 'constituent': self.Nconstituents, + 'c_output_types': self.c_output_types} self.filename = filename self.mode = mode - + + def list_data(self): + """Shows information on all datasets in the file""" + with h5py.File(self.filename,'r') as f: + group_inc = 'inc{:05}'.format(self.active['increments'][0]['inc']) + for c in self.active['constituents']: + print('\n'+c) + group_constituent = group_inc+'/constituent/'+c + for t in self.active['c_output_types']: + print(' {}'.format(t)) + group_output_types = group_constituent+'/'+t + try: + for x in f[group_output_types].keys(): + print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode())) + except: + pass + def get_dataset_location(self,label): + """Returns the location of all active datasets with given label""" path = [] with h5py.File(self.filename,'r') as f: for i in self.active['increments']: group_inc = 'inc{:05}'.format(i['inc']) for c in self.active['constituents']: group_constituent = group_inc+'/constituent/'+c - for t in f[group_constituent].keys(): + for t in self.active['c_output_types']: try: f[group_constituent+'/'+t+'/'+label] path.append(group_constituent+'/'+t+'/'+label) @@ -67,12 +94,20 @@ class DADF5(): def read_dataset(self,path,c): + """ + Dataset for all points/cells + + + If more than one path is given, the dataset is composed of the individual contributions + """ with h5py.File(self.filename,'r') as f: - shape = (self.Nmaterialpoints,) + np.shape(f[path])[1:] + shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] dataset = np.full(shape,np.nan) - label = path.split('/')[2] - p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label)) - for s in p: dataset[s,:] = f[path][s,:] + for pa in path: + label = pa.split('/')[2] + p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] + u = (f['mapping/cellResults/constituent'][p,c]['Position']) + dataset[p,:] = f[pa][u,:] return dataset