polishing

This commit is contained in:
Martin Diehl 2021-04-05 10:29:34 +02:00
parent 65b03aeb2d
commit 9db1ef9ed7
4 changed files with 34 additions and 30 deletions

@ -1 +1 @@
Subproject commit 4ce1f786dc2a613f29b2c8681fcf751d6803d38e Subproject commit 90ad4d1c4e7ef9ccd8e6b30ee9b771dd6187f372

View File

@ -29,7 +29,7 @@ h5py3 = h5py.__version__[0] == '3'
def _read(dataset): def _read(dataset):
"""Read a dataset and its metadata into a numpy.ndarray.""" """Read a dataset and its metadata into a numpy.ndarray."""
metadata = {k:(v if h5py3 else v.decode()) for k,v in dataset.attrs.items()} metadata = {k:(v.decode() if not h5py3 and type(v) is bytes else v) for k,v in dataset.attrs.items()}
dtype = np.dtype(dataset.dtype,metadata=metadata) dtype = np.dtype(dataset.dtype,metadata=metadata)
return np.array(dataset,dtype=dtype) return np.array(dataset,dtype=dtype)
@ -268,9 +268,9 @@ class Result:
Parameters Parameters
---------- ----------
what : str what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change (must be from self.visible). Attribute to change.
datasets : list of str or bool datasets : int (for increments), float (for times), str or list of, bool
Name of datasets as list; supports ? and * wildcards. Name of datasets as list; supports ? and * wildcards.
True is equivalent to *, False is equivalent to []. True is equivalent to *, False is equivalent to [].
@ -284,9 +284,9 @@ class Result:
Parameters Parameters
---------- ----------
what : str what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change (must be from self.visible). Attribute to change.
datasets : list of str or bool datasets : int (for increments), float (for times), str or list of, bool
Name of datasets as list; supports ? and * wildcards. Name of datasets as list; supports ? and * wildcards.
True is equivalent to *, False is equivalent to []. True is equivalent to *, False is equivalent to [].
@ -300,9 +300,9 @@ class Result:
Parameters Parameters
---------- ----------
what : str what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change (must be from self.visible). Attribute to change.
datasets : list of str or bool datasets : int (for increments), float (for times), str or list of, bool
Name of datasets as list; supports ? and * wildcards. Name of datasets as list; supports ? and * wildcards.
True is equivalent to *, False is equivalent to []. True is equivalent to *, False is equivalent to [].
@ -1016,11 +1016,14 @@ class Result:
""" """
Write XDMF file to directly visualize data in DADF5 file. Write XDMF file to directly visualize data in DADF5 file.
The view is not taken into account, i.e. the content of the Parameters
whole file will be included. ----------
output : str or list of str
Labels of the datasets to read. Defaults to '*', in which
case all datasets are considered.
""" """
# compatibility hack u = 'Unit' if self.version_minor < 12 else 'unit' # compatibility hack
u = 'Unit' if self.version_minor < 12 else 'unit'
if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured: if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured:
raise TypeError('XDMF output requires homogeneous grid') raise TypeError('XDMF output requires homogeneous grid')
@ -1047,10 +1050,11 @@ class Result:
time.attrib={'TimeType': 'List'} time.attrib={'TimeType': 'List'}
time_data = ET.SubElement(time, 'DataItem') time_data = ET.SubElement(time, 'DataItem')
times = [self.times[self.increments.index(i)] for i in self.visible['increments']]
time_data.attrib={'Format': 'XML', time_data.attrib={'Format': 'XML',
'NumberType': 'Float', 'NumberType': 'Float',
'Dimensions': f'{len(self.times)}'} 'Dimensions': f'{len(times)}'}
time_data.text = ' '.join(map(str,self.times)) time_data.text = ' '.join(map(str,times))
attributes = [] attributes = []
data_items = [] data_items = []
@ -1100,7 +1104,6 @@ class Result:
shape = f[name].shape[1:] shape = f[name].shape[1:]
dtype = f[name].dtype dtype = f[name].dtype
if dtype not in np.sctypes['int']+np.sctypes['uint']+np.sctypes['float']: continue
unit = f[name].attrs[u] if h5py3 else f[name].attrs[u].decode() unit = f[name].attrs[u] if h5py3 else f[name].attrs[u].decode()
attributes.append(ET.SubElement(grid, 'Attribute')) attributes.append(ET.SubElement(grid, 'Attribute'))
@ -1119,7 +1122,7 @@ class Result:
f.write(xml.dom.minidom.parseString(ET.tostring(xdmf).decode()).toprettyxml()) f.write(xml.dom.minidom.parseString(ET.tostring(xdmf).decode()).toprettyxml())
def save_VTK(self,output='*',mode='cell',constituents=None,fill_float=np.nan,fill_int=0): def save_VTK(self,output='*',mode='cell',constituents=None,fill_float=np.nan,fill_int=0,parallel=True):
""" """
Export to vtk cell/point data. Export to vtk cell/point data.
@ -1140,6 +1143,9 @@ class Result:
fill_int : int fill_int : int
Fill value for non-existent entries of integer type. Fill value for non-existent entries of integer type.
Defaults to 0. Defaults to 0.
parallel : bool
Write out VTK files in parallel in a separate background process.
Defaults to True.
""" """
if mode.lower()=='cell': if mode.lower()=='cell':
@ -1147,7 +1153,7 @@ class Result:
elif mode.lower()=='point': elif mode.lower()=='point':
v = VTK.from_poly_data(self.coordinates0_point) v = VTK.from_poly_data(self.coordinates0_point)
ln = 3 if self.version_minor < 12 else 10 # compatibility hack ln = 3 if self.version_minor < 12 else 10 # compatibility hack
N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][ln:])))))+1 N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][ln:])))))+1
constituents_ = constituents if isinstance(constituents,Iterable) else \ constituents_ = constituents if isinstance(constituents,Iterable) else \
@ -1156,9 +1162,9 @@ class Result:
suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \ suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \
[f'#{c}' for c in constituents_] [f'#{c}' for c in constituents_]
grp = 'mapping' if self.version_minor < 12 else 'cell_to' # compatibility hack grp = 'mapping' if self.version_minor < 12 else 'cell_to' # compatibility hack
name = 'Name' if self.version_minor < 12 else 'label' # compatibility hack name = 'Name' if self.version_minor < 12 else 'label' # compatibility hack
member = 'member' if self.version_minor < 12 else 'entry' # compatibility hack member = 'member' if self.version_minor < 12 else 'entry' # compatibility hack
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
@ -1207,7 +1213,7 @@ class Result:
for label,dataset in outs.items(): for label,dataset in outs.items():
v.add(dataset,' / '.join(['/'.join([ty,field,label]),dataset.dtype.metadata['unit']])) v.add(dataset,' / '.join(['/'.join([ty,field,label]),dataset.dtype.metadata['unit']]))
v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}') v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}',parallel=parallel)
def read(self,output='*',flatten=True,prune=True): def read(self,output='*',flatten=True,prune=True):
@ -1294,9 +1300,9 @@ class Result:
suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \ suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \
[f'#{c}' for c in constituents_] [f'#{c}' for c in constituents_]
grp = 'mapping' if self.version_minor < 12 else 'cell_to' # compatibility hack grp = 'mapping' if self.version_minor < 12 else 'cell_to' # compatibility hack
name = 'Name' if self.version_minor < 12 else 'label' # compatibility hack name = 'Name' if self.version_minor < 12 else 'label' # compatibility hack
member = 'member' if self.version_minor < 12 else 'entry' # compatibility hack member = 'member' if self.version_minor < 12 else 'entry' # compatibility hack
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:

View File

@ -230,7 +230,6 @@ class Table:
f = fname f = fname
f.seek(0) f.seek(0)
f.seek(0)
comments = [] comments = []
line = f.readline().strip() line = f.readline().strip()
while line.startswith('#'): while line.startswith('#'):
@ -515,7 +514,7 @@ class Table:
""" """
if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]: if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]:
raise KeyError('Dublicated keys or row count mismatch') raise KeyError('Duplicated keys or row count mismatch')
else: else:
dup = self.copy() dup = self.copy()
dup.data = dup.data.join(other.data) dup.data = dup.data.join(other.data)

View File

@ -9,7 +9,6 @@ from datetime import datetime
import pytest import pytest
import numpy as np import numpy as np
import h5py
from damask import Result from damask import Result
from damask import Rotation from damask import Rotation