renamed result.read to get; polishing
This commit is contained in:
parent
02277fb820
commit
bf4c88a39e
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit 90ad4d1c4e7ef9ccd8e6b30ee9b771dd6187f372
|
Subproject commit 1298124143e7e2901d0b9c2e79ab6388cb78a1e3
|
|
@ -49,7 +49,7 @@ def _match(requested,existing):
|
||||||
return sorted(set(flatten_list([fnmatch.filter(existing,r) for r in requested_])),
|
return sorted(set(flatten_list([fnmatch.filter(existing,r) for r in requested_])),
|
||||||
key=util.natural_sort)
|
key=util.natural_sort)
|
||||||
|
|
||||||
def _empty(dataset,N_materialpoints,fill_float,fill_int):
|
def _empty_like(dataset,N_materialpoints,fill_float,fill_int):
|
||||||
"""Create empty numpy.ma.MaskedArray."""
|
"""Create empty numpy.ma.MaskedArray."""
|
||||||
return ma.array(np.empty((N_materialpoints,)+dataset.shape[1:],dataset.dtype),
|
return ma.array(np.empty((N_materialpoints,)+dataset.shape[1:],dataset.dtype),
|
||||||
fill_value = fill_float if dataset.dtype in np.sctypes['float'] else fill_int,
|
fill_value = fill_float if dataset.dtype in np.sctypes['float'] else fill_int,
|
||||||
|
@ -95,8 +95,8 @@ class Result:
|
||||||
|
|
||||||
r=re.compile('inc[0-9]+' if self.version_minor < 12 else 'increment_[0-9]+')
|
r=re.compile('inc[0-9]+' if self.version_minor < 12 else 'increment_[0-9]+')
|
||||||
self.increments = sorted([i for i in f.keys() if r.match(i)],key=util.natural_sort)
|
self.increments = sorted([i for i in f.keys() if r.match(i)],key=util.natural_sort)
|
||||||
self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] if self.version_minor < 12 else \
|
self.times = [round(f[i].attrs['time/s' if self.version_minor < 12 else
|
||||||
[round(f[i].attrs['t/s'],12) for i in self.increments]
|
't/s'],12) for i in self.increments]
|
||||||
|
|
||||||
grp = 'mapping' if self.version_minor < 12 else 'cell_to'
|
grp = 'mapping' if self.version_minor < 12 else 'cell_to'
|
||||||
|
|
||||||
|
@ -159,9 +159,9 @@ class Result:
|
||||||
Select from 'set', 'add', and 'del'.
|
Select from 'set', 'add', and 'del'.
|
||||||
what : str
|
what : str
|
||||||
Attribute to change (must be from self.visible).
|
Attribute to change (must be from self.visible).
|
||||||
datasets : str, int, list of str, list of int, or bool
|
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
|
||||||
Name of datasets as list; supports ? and * wildcards.
|
Name of datasets; supports '?' and '*' wildcards.
|
||||||
True is equivalent to [*], False is equivalent to [].
|
True is equivalent to '*', False is equivalent to [].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# allow True/False and string arguments
|
# allow True/False and string arguments
|
||||||
|
@ -270,9 +270,9 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
||||||
Attribute to change.
|
Attribute to change.
|
||||||
datasets : int (for increments), float (for times), str or list of, bool
|
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
|
||||||
Name of datasets as list; supports ? and * wildcards.
|
Name of datasets; supports '?' and '*' wildcards.
|
||||||
True is equivalent to *, False is equivalent to [].
|
True is equivalent to '*', False is equivalent to [].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self._manage_view('set',what,datasets)
|
return self._manage_view('set',what,datasets)
|
||||||
|
@ -286,9 +286,9 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
||||||
Attribute to change.
|
Attribute to change.
|
||||||
datasets : int (for increments), float (for times), str or list of, bool
|
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
|
||||||
Name of datasets as list; supports ? and * wildcards.
|
Name of datasets; supports '?' and '*' wildcards.
|
||||||
True is equivalent to *, False is equivalent to [].
|
True is equivalent to '*', False is equivalent to [].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self._manage_view('add',what,datasets)
|
return self._manage_view('add',what,datasets)
|
||||||
|
@ -302,9 +302,9 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
|
||||||
Attribute to change.
|
Attribute to change.
|
||||||
datasets : int (for increments), float (for times), str or list of, bool
|
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
|
||||||
Name of datasets as list; supports ? and * wildcards.
|
Name of datasets; supports '?' and '*' wildcards.
|
||||||
True is equivalent to *, False is equivalent to [].
|
True is equivalent to '*', False is equivalent to [].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return self._manage_view('del',what,datasets)
|
return self._manage_view('del',what,datasets)
|
||||||
|
@ -448,7 +448,7 @@ class Result:
|
||||||
label : str
|
label : str
|
||||||
Label of resulting dataset.
|
Label of resulting dataset.
|
||||||
formula : str
|
formula : str
|
||||||
Formula to calculate resulting dataset. Existing datasets are referenced by ‘#TheirLabel#‘.
|
Formula to calculate resulting dataset. Existing datasets are referenced by '#TheirLabel#'.
|
||||||
unit : str, optional
|
unit : str, optional
|
||||||
Physical unit of the result.
|
Physical unit of the result.
|
||||||
description : str, optional
|
description : str, optional
|
||||||
|
@ -480,9 +480,9 @@ class Result:
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
P : str, optional
|
P : str, optional
|
||||||
Label of the dataset containing the first Piola-Kirchhoff stress. Defaults to ‘P’.
|
Label of the dataset containing the first Piola-Kirchhoff stress. Defaults to 'P'.
|
||||||
F : str, optional
|
F : str, optional
|
||||||
Label of the dataset containing the deformation gradient. Defaults to ‘F’.
|
Label of the dataset containing the deformation gradient. Defaults to 'F'.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_stress_Cauchy,{'P':P,'F':F})
|
self._add_generic_pointwise(self._add_stress_Cauchy,{'P':P,'F':F})
|
||||||
|
@ -563,7 +563,7 @@ class Result:
|
||||||
T_sym : str
|
T_sym : str
|
||||||
Label of symmetric tensor dataset.
|
Label of symmetric tensor dataset.
|
||||||
eigenvalue : str, optional
|
eigenvalue : str, optional
|
||||||
Eigenvalue. Select from ‘max’, ‘mid’, ‘min’. Defaults to ‘max’.
|
Eigenvalue. Select from 'max', 'mid', 'min'. Defaults to 'max'.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_eigenvalue,{'T_sym':T_sym},{'eigenvalue':eigenvalue})
|
self._add_generic_pointwise(self._add_eigenvalue,{'T_sym':T_sym},{'eigenvalue':eigenvalue})
|
||||||
|
@ -596,8 +596,8 @@ class Result:
|
||||||
T_sym : str
|
T_sym : str
|
||||||
Label of symmetric tensor dataset.
|
Label of symmetric tensor dataset.
|
||||||
eigenvalue : str, optional
|
eigenvalue : str, optional
|
||||||
Eigenvalue to which the eigenvector corresponds. Select from
|
Eigenvalue to which the eigenvector corresponds.
|
||||||
‘max’, ‘mid’, ‘min’. Defaults to ‘max’.
|
Select from 'max', 'mid', 'min'. Defaults to 'max'.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_eigenvector,{'T_sym':T_sym},{'eigenvalue':eigenvalue})
|
self._add_generic_pointwise(self._add_eigenvector,{'T_sym':T_sym},{'eigenvalue':eigenvalue})
|
||||||
|
@ -696,7 +696,7 @@ class Result:
|
||||||
Label of symmetric tensorial stress or strain dataset.
|
Label of symmetric tensorial stress or strain dataset.
|
||||||
kind : {'stress', 'strain', None}, optional
|
kind : {'stress', 'strain', None}, optional
|
||||||
Kind of the von Mises equivalent. Defaults to None, in which case
|
Kind of the von Mises equivalent. Defaults to None, in which case
|
||||||
it is selected based on the unit of the dataset ('1' -> strain, 'Pa' -> stress').
|
it is selected based on the unit of the dataset ('1' -> strain, 'Pa' -> stress).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_equivalent_Mises,{'T_sym':T_sym},{'kind':kind})
|
self._add_generic_pointwise(self._add_equivalent_Mises,{'T_sym':T_sym},{'kind':kind})
|
||||||
|
@ -733,7 +733,7 @@ class Result:
|
||||||
----------
|
----------
|
||||||
x : str
|
x : str
|
||||||
Label of vector or tensor dataset.
|
Label of vector or tensor dataset.
|
||||||
ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional
|
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
|
||||||
Order of the norm. inf means NumPy’s inf object. For details refer to numpy.linalg.norm.
|
Order of the norm. inf means NumPy’s inf object. For details refer to numpy.linalg.norm.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -760,9 +760,9 @@ class Result:
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
P : str, optional
|
P : str, optional
|
||||||
Label of first Piola-Kirchhoff stress dataset. Defaults to ‘P’.
|
Label of first Piola-Kirchhoff stress dataset. Defaults to 'P'.
|
||||||
F : str, optional
|
F : str, optional
|
||||||
Label of deformation gradient dataset. Defaults to ‘F’.
|
Label of deformation gradient dataset. Defaults to 'F'.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_stress_second_Piola_Kirchhoff,{'P':P,'F':F})
|
self._add_generic_pointwise(self._add_stress_second_Piola_Kirchhoff,{'P':P,'F':F})
|
||||||
|
@ -874,17 +874,17 @@ class Result:
|
||||||
"""
|
"""
|
||||||
Add strain tensor of a deformation gradient.
|
Add strain tensor of a deformation gradient.
|
||||||
|
|
||||||
For details refer to damask.mechanics.strain
|
For details, see damask.mechanics.strain.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
F : str, optional
|
F : str, optional
|
||||||
Label of deformation gradient dataset. Defaults to ‘F’.
|
Label of deformation gradient dataset. Defaults to 'F'.
|
||||||
t : {‘V’, ‘U’}, optional
|
t : {'V', 'U'}, optional
|
||||||
Type of the polar decomposition, ‘V’ for left stretch tensor and ‘U’ for right stretch tensor.
|
Type of the polar decomposition, 'V' for left stretch tensor and 'U' for right stretch tensor.
|
||||||
Defaults to ‘V’.
|
Defaults to 'V'.
|
||||||
m : float, optional
|
m : float, optional
|
||||||
Order of the strain calculation. Defaults to ‘0.0’.
|
Order of the strain calculation. Defaults to 0.0.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_strain,{'F':F},{'t':t,'m':m})
|
self._add_generic_pointwise(self._add_strain,{'F':F},{'t':t,'m':m})
|
||||||
|
@ -909,10 +909,10 @@ class Result:
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
F : str, optional
|
F : str, optional
|
||||||
Label of deformation gradient dataset. Defaults to ‘F’.
|
Label of deformation gradient dataset. Defaults to 'F'.
|
||||||
t : {‘V’, ‘U’}, optional
|
t : {'V', 'U'}, optional
|
||||||
Type of the polar decomposition, ‘V’ for left stretch tensor and ‘U’ for right stretch tensor.
|
Type of the polar decomposition, 'V' for left stretch tensor and 'U' for right stretch tensor.
|
||||||
Defaults to ‘V’.
|
Defaults to 'V'.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._add_generic_pointwise(self._add_stretch_tensor,{'F':F},{'t':t})
|
self._add_generic_pointwise(self._add_stretch_tensor,{'F':F},{'t':t})
|
||||||
|
@ -947,8 +947,8 @@ class Result:
|
||||||
Callback function that calculates a new dataset from one or
|
Callback function that calculates a new dataset from one or
|
||||||
more datasets per HDF5 group.
|
more datasets per HDF5 group.
|
||||||
datasets : dictionary
|
datasets : dictionary
|
||||||
Details of the datasets to be used: label (in HDF5 file) and
|
Details of the datasets to be used:
|
||||||
arg (argument to which the data is parsed in func).
|
{arg (name to which the data is passed in func): label (in HDF5 file)}.
|
||||||
args : dictionary, optional
|
args : dictionary, optional
|
||||||
Arguments parsed to func.
|
Arguments parsed to func.
|
||||||
|
|
||||||
|
@ -1018,14 +1018,14 @@ class Result:
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
output : str or list of str
|
output : (list of) str
|
||||||
Labels of the datasets to read. Defaults to '*', in which
|
Labels of the datasets to read.
|
||||||
case all datasets are considered.
|
Defaults to '*', in which case all datasets are considered.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
u = 'Unit' if self.version_minor < 12 else 'unit' # compatibility hack
|
u = 'Unit' if self.version_minor < 12 else 'unit' # compatibility hack
|
||||||
if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured:
|
if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured:
|
||||||
raise TypeError('XDMF output requires homogeneous grid')
|
raise TypeError('XDMF output requires structured grid with single phase and single constituent.')
|
||||||
|
|
||||||
|
|
||||||
attribute_type_map = defaultdict(lambda:'Matrix', ( ((),'Scalar'), ((3,),'Vector'), ((3,3),'Tensor')) )
|
attribute_type_map = defaultdict(lambda:'Matrix', ( ((),'Scalar'), ((3,),'Vector'), ((3,3),'Tensor')) )
|
||||||
|
@ -1036,11 +1036,11 @@ class Result:
|
||||||
if dtype in np.sctypes['float']: return 'Float'
|
if dtype in np.sctypes['float']: return 'Float'
|
||||||
|
|
||||||
|
|
||||||
xdmf=ET.Element('Xdmf')
|
xdmf = ET.Element('Xdmf')
|
||||||
xdmf.attrib={'Version': '2.0',
|
xdmf.attrib={'Version': '2.0',
|
||||||
'xmlns:xi': 'http://www.w3.org/2001/XInclude'}
|
'xmlns:xi': 'http://www.w3.org/2001/XInclude'}
|
||||||
|
|
||||||
domain=ET.SubElement(xdmf, 'Domain')
|
domain = ET.SubElement(xdmf, 'Domain')
|
||||||
|
|
||||||
collection = ET.SubElement(domain, 'Grid')
|
collection = ET.SubElement(domain, 'Grid')
|
||||||
collection.attrib={'GridType': 'Collection',
|
collection.attrib={'GridType': 'Collection',
|
||||||
|
@ -1062,38 +1062,38 @@ class Result:
|
||||||
with h5py.File(self.fname,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
for inc in self.visible['increments']:
|
for inc in self.visible['increments']:
|
||||||
|
|
||||||
grid=ET.SubElement(collection,'Grid')
|
grid = ET.SubElement(collection,'Grid')
|
||||||
grid.attrib = {'GridType': 'Uniform',
|
grid.attrib = {'GridType': 'Uniform',
|
||||||
'Name': inc}
|
'Name': inc}
|
||||||
|
|
||||||
topology=ET.SubElement(grid, 'Topology')
|
topology = ET.SubElement(grid, 'Topology')
|
||||||
topology.attrib={'TopologyType': '3DCoRectMesh',
|
topology.attrib = {'TopologyType': '3DCoRectMesh',
|
||||||
'Dimensions': '{} {} {}'.format(*self.cells+1)}
|
'Dimensions': '{} {} {}'.format(*(self.cells+1))}
|
||||||
|
|
||||||
geometry=ET.SubElement(grid, 'Geometry')
|
geometry = ET.SubElement(grid, 'Geometry')
|
||||||
geometry.attrib={'GeometryType':'Origin_DxDyDz'}
|
geometry.attrib = {'GeometryType':'Origin_DxDyDz'}
|
||||||
|
|
||||||
origin=ET.SubElement(geometry, 'DataItem')
|
origin = ET.SubElement(geometry, 'DataItem')
|
||||||
origin.attrib={'Format': 'XML',
|
origin.attrib = {'Format': 'XML',
|
||||||
'NumberType': 'Float',
|
'NumberType': 'Float',
|
||||||
'Dimensions': '3'}
|
'Dimensions': '3'}
|
||||||
origin.text="{} {} {}".format(*self.origin)
|
origin.text = "{} {} {}".format(*self.origin)
|
||||||
|
|
||||||
delta=ET.SubElement(geometry, 'DataItem')
|
delta = ET.SubElement(geometry, 'DataItem')
|
||||||
delta.attrib={'Format': 'XML',
|
delta.attrib = {'Format': 'XML',
|
||||||
'NumberType': 'Float',
|
'NumberType': 'Float',
|
||||||
'Dimensions': '3'}
|
'Dimensions': '3'}
|
||||||
delta.text="{} {} {}".format(*(self.size/self.cells))
|
delta.text="{} {} {}".format(*(self.size/self.cells))
|
||||||
|
|
||||||
attributes.append(ET.SubElement(grid, 'Attribute'))
|
attributes.append(ET.SubElement(grid, 'Attribute'))
|
||||||
attributes[-1].attrib={'Name': 'u / m',
|
attributes[-1].attrib = {'Name': 'u / m',
|
||||||
'Center': 'Node',
|
'Center': 'Node',
|
||||||
'AttributeType': 'Vector'}
|
'AttributeType': 'Vector'}
|
||||||
data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
|
data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
|
||||||
data_items[-1].attrib={'Format': 'HDF',
|
data_items[-1].attrib = {'Format': 'HDF',
|
||||||
'Precision': '8',
|
'Precision': '8',
|
||||||
'Dimensions': '{} {} {} 3'.format(*(self.cells+1))}
|
'Dimensions': '{} {} {} 3'.format(*(self.cells+1))}
|
||||||
data_items[-1].text=f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n'
|
data_items[-1].text = f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n'
|
||||||
|
|
||||||
for ty in ['phase','homogenization']:
|
for ty in ['phase','homogenization']:
|
||||||
for label in self.visible[ty+'s']:
|
for label in self.visible[ty+'s']:
|
||||||
|
@ -1106,16 +1106,16 @@ class Result:
|
||||||
unit = f[name].attrs[u] if h5py3 else f[name].attrs[u].decode()
|
unit = f[name].attrs[u] if h5py3 else f[name].attrs[u].decode()
|
||||||
|
|
||||||
attributes.append(ET.SubElement(grid, 'Attribute'))
|
attributes.append(ET.SubElement(grid, 'Attribute'))
|
||||||
attributes[-1].attrib={'Name': name.split('/',2)[2]+f' / {unit}',
|
attributes[-1].attrib = {'Name': name.split('/',2)[2]+f' / {unit}',
|
||||||
'Center': 'Cell',
|
'Center': 'Cell',
|
||||||
'AttributeType': attribute_type_map[shape]}
|
'AttributeType': attribute_type_map[shape]}
|
||||||
data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
|
data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
|
||||||
data_items[-1].attrib={'Format': 'HDF',
|
data_items[-1].attrib = {'Format': 'HDF',
|
||||||
'NumberType': number_type_map(dtype),
|
'NumberType': number_type_map(dtype),
|
||||||
'Precision': f'{dtype.itemsize}',
|
'Precision': f'{dtype.itemsize}',
|
||||||
'Dimensions': '{} {} {} {}'.format(*self.cells,1 if shape == () else
|
'Dimensions': '{} {} {} {}'.format(*self.cells,1 if shape == () else
|
||||||
np.prod(shape))}
|
np.prod(shape))}
|
||||||
data_items[-1].text=f'{os.path.split(self.fname)[1]}:{name}'
|
data_items[-1].text = f'{os.path.split(self.fname)[1]}:{name}'
|
||||||
|
|
||||||
with open(self.fname.with_suffix('.xdmf').name,'w',newline='\n') as f:
|
with open(self.fname.with_suffix('.xdmf').name,'w',newline='\n') as f:
|
||||||
f.write(xml.dom.minidom.parseString(ET.tostring(xdmf).decode()).toprettyxml())
|
f.write(xml.dom.minidom.parseString(ET.tostring(xdmf).decode()).toprettyxml())
|
||||||
|
@ -1146,22 +1146,22 @@ class Result:
|
||||||
|
|
||||||
def save_VTK(self,output='*',mode='cell',constituents=None,fill_float=np.nan,fill_int=0,parallel=True):
|
def save_VTK(self,output='*',mode='cell',constituents=None,fill_float=np.nan,fill_int=0,parallel=True):
|
||||||
"""
|
"""
|
||||||
Export to vtk cell/point data.
|
Export to VTK cell/point data.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
output : str or list of, optional
|
output : (list of) str, optional
|
||||||
Labels of the datasets to place. Defaults to '*', in which
|
Labels of the datasets to place.
|
||||||
case all datasets are exported.
|
Defaults to '*', in which case all datasets are exported.
|
||||||
mode : str, either 'cell' or 'point'
|
mode : {'cell', 'point'}
|
||||||
Export in cell format or point format.
|
Export in cell format or point format.
|
||||||
Defaults to 'cell'.
|
Defaults to 'cell'.
|
||||||
constituents : int or list of, optional
|
constituents : (list of) int, optional
|
||||||
Constituents to consider. Defaults to 'None', in which case
|
Constituents to consider.
|
||||||
all constituents are considered.
|
Defaults to None, in which case all constituents are considered.
|
||||||
fill_float : float
|
fill_float : float
|
||||||
Fill value for non-existent entries of floating point type.
|
Fill value for non-existent entries of floating point type.
|
||||||
Defaults to 0.0.
|
Defaults to NaN.
|
||||||
fill_int : int
|
fill_int : int
|
||||||
Fill value for non-existent entries of integer type.
|
Fill value for non-existent entries of integer type.
|
||||||
Defaults to 0.
|
Defaults to 0.
|
||||||
|
@ -1206,14 +1206,14 @@ class Result:
|
||||||
if out+suffixes[0] not in outs.keys():
|
if out+suffixes[0] not in outs.keys():
|
||||||
for c,suffix in zip(constituents_,suffixes):
|
for c,suffix in zip(constituents_,suffixes):
|
||||||
outs[out+suffix] = \
|
outs[out+suffix] = \
|
||||||
_empty(data,self.N_materialpoints,fill_float,fill_int)
|
_empty_like(data,self.N_materialpoints,fill_float,fill_int)
|
||||||
|
|
||||||
for c,suffix in zip(constituents_,suffixes):
|
for c,suffix in zip(constituents_,suffixes):
|
||||||
outs[out+suffix][at_cell_ph[c][label]] = data[in_data_ph[c][label]]
|
outs[out+suffix][at_cell_ph[c][label]] = data[in_data_ph[c][label]]
|
||||||
|
|
||||||
if ty == 'homogenization':
|
if ty == 'homogenization':
|
||||||
if out not in outs.keys():
|
if out not in outs.keys():
|
||||||
outs[out] = _empty(data,self.N_materialpoints,fill_float,fill_int)
|
outs[out] = _empty_like(data,self.N_materialpoints,fill_float,fill_int)
|
||||||
|
|
||||||
outs[out][at_cell_ho[label]] = data[in_data_ho[label]]
|
outs[out][at_cell_ho[label]] = data[in_data_ho[label]]
|
||||||
|
|
||||||
|
@ -1223,18 +1223,15 @@ class Result:
|
||||||
v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}',parallel=parallel)
|
v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}',parallel=parallel)
|
||||||
|
|
||||||
|
|
||||||
def read(self,output='*',flatten=True,prune=True):
|
def get(self,output='*',flatten=True,prune=True):
|
||||||
"""
|
"""
|
||||||
Export data per phase/homogenization.
|
Collect data per phase/homogenization reflecting the group/folder structure in the DADF5 file.
|
||||||
|
|
||||||
The returned data structure reflects the group/folder structure
|
|
||||||
in the DADF5 file.
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
output : str or list of str
|
output : (list of) str
|
||||||
Labels of the datasets to read. Defaults to '*', in which
|
Labels of the datasets to read.
|
||||||
case all datasets are read.
|
Defaults to '*', in which case all datasets are read.
|
||||||
flatten : bool
|
flatten : bool
|
||||||
Remove singular levels of the folder hierarchy.
|
Remove singular levels of the folder hierarchy.
|
||||||
This might be beneficial in case of single increment,
|
This might be beneficial in case of single increment,
|
||||||
|
@ -1242,6 +1239,11 @@ class Result:
|
||||||
prune : bool
|
prune : bool
|
||||||
Remove branches with no data. Defaults to True.
|
Remove branches with no data. Defaults to True.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
data : dict of numpy.ndarray
|
||||||
|
Datasets structured by phase/homogenization and according to selected view.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
r = {}
|
r = {}
|
||||||
|
|
||||||
|
@ -1266,9 +1268,9 @@ class Result:
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
def place(self,output='*',flatten=True,prune=True,constituents=None,fill_float=0.0,fill_int=0):
|
def place(self,output='*',flatten=True,prune=True,constituents=None,fill_float=np.nan,fill_int=0):
|
||||||
"""
|
"""
|
||||||
Export data in spatial order that is compatible with the damask.VTK geometry representation.
|
Merge data into spatial order that is compatible with the damask.VTK geometry representation.
|
||||||
|
|
||||||
The returned data structure reflects the group/folder structure
|
The returned data structure reflects the group/folder structure
|
||||||
in the DADF5 file.
|
in the DADF5 file.
|
||||||
|
@ -1279,21 +1281,21 @@ class Result:
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
output : str or list of, optional
|
output : (list of) str, optional
|
||||||
Labels of the datasets to place. Defaults to '*', in which
|
Labels of the datasets to place.
|
||||||
case all datasets are placed.
|
Defaults to '*', in which case all datasets are placed.
|
||||||
flatten : bool
|
flatten : bool
|
||||||
Remove singular levels of the folder hierarchy.
|
Remove singular levels of the folder hierarchy.
|
||||||
This might be beneficial in case of single increment
|
This might be beneficial in case of single increment or field.
|
||||||
or field. Defaults to True.
|
Defaults to True.
|
||||||
prune : bool
|
prune : bool
|
||||||
Remove branches with no data. Defaults to True.
|
Remove branches with no data. Defaults to True.
|
||||||
constituents : int or list of, optional
|
constituents : (list of) int, optional
|
||||||
Constituents to consider. Defaults to 'None', in which case
|
Constituents to consider.
|
||||||
all constituents are considered.
|
Defaults to 'None', in which case all constituents are considered.
|
||||||
fill_float : float
|
fill_float : float
|
||||||
Fill value for non-existent entries of floating point type.
|
Fill value for non-existent entries of floating point type.
|
||||||
Defaults to 0.0.
|
Defaults to NaN.
|
||||||
fill_int : int
|
fill_int : int
|
||||||
Fill value for non-existent entries of integer type.
|
Fill value for non-existent entries of integer type.
|
||||||
Defaults to 0.
|
Defaults to 0.
|
||||||
|
@ -1330,7 +1332,7 @@ class Result:
|
||||||
if out+suffixes[0] not in r[inc][ty][field].keys():
|
if out+suffixes[0] not in r[inc][ty][field].keys():
|
||||||
for c,suffix in zip(constituents_,suffixes):
|
for c,suffix in zip(constituents_,suffixes):
|
||||||
r[inc][ty][field][out+suffix] = \
|
r[inc][ty][field][out+suffix] = \
|
||||||
_empty(data,self.N_materialpoints,fill_float,fill_int)
|
_empty_like(data,self.N_materialpoints,fill_float,fill_int)
|
||||||
|
|
||||||
for c,suffix in zip(constituents_,suffixes):
|
for c,suffix in zip(constituents_,suffixes):
|
||||||
r[inc][ty][field][out+suffix][at_cell_ph[c][label]] = data[in_data_ph[c][label]]
|
r[inc][ty][field][out+suffix][at_cell_ph[c][label]] = data[in_data_ph[c][label]]
|
||||||
|
@ -1338,7 +1340,7 @@ class Result:
|
||||||
if ty == 'homogenization':
|
if ty == 'homogenization':
|
||||||
if out not in r[inc][ty][field].keys():
|
if out not in r[inc][ty][field].keys():
|
||||||
r[inc][ty][field][out] = \
|
r[inc][ty][field][out] = \
|
||||||
_empty(data,self.N_materialpoints,fill_float,fill_int)
|
_empty_like(data,self.N_materialpoints,fill_float,fill_int)
|
||||||
|
|
||||||
r[inc][ty][field][out][at_cell_ho[label]] = data[in_data_ho[label]]
|
r[inc][ty][field][out][at_cell_ho[label]] = data[in_data_ho[label]]
|
||||||
|
|
||||||
|
|
|
@ -56,19 +56,19 @@ class TestResult:
|
||||||
|
|
||||||
|
|
||||||
def test_view_all(self,default):
|
def test_view_all(self,default):
|
||||||
a = default.view('increments',True).read('F')
|
a = default.view('increments',True).get('F')
|
||||||
|
|
||||||
assert dict_equal(a,default.view('increments','*').read('F'))
|
assert dict_equal(a,default.view('increments','*').get('F'))
|
||||||
assert dict_equal(a,default.view('increments',default.increments_in_range(0,np.iinfo(int).max)).read('F'))
|
assert dict_equal(a,default.view('increments',default.increments_in_range(0,np.iinfo(int).max)).get('F'))
|
||||||
|
|
||||||
assert dict_equal(a,default.view('times',True).read('F'))
|
assert dict_equal(a,default.view('times',True).get('F'))
|
||||||
assert dict_equal(a,default.view('times','*').read('F'))
|
assert dict_equal(a,default.view('times','*').get('F'))
|
||||||
assert dict_equal(a,default.view('times',default.times_in_range(0.0,np.inf)).read('F'))
|
assert dict_equal(a,default.view('times',default.times_in_range(0.0,np.inf)).get('F'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
||||||
def test_view_none(self,default,what):
|
def test_view_none(self,default,what):
|
||||||
a = default.view(what,False).read('F')
|
a = default.view(what,False).get('F')
|
||||||
b = default.view(what,[]).read('F')
|
b = default.view(what,[]).get('F')
|
||||||
|
|
||||||
assert a == b == {}
|
assert a == b == {}
|
||||||
|
|
||||||
|
@ -76,8 +76,8 @@ class TestResult:
|
||||||
def test_view_more(self,default,what):
|
def test_view_more(self,default,what):
|
||||||
empty = default.view(what,False)
|
empty = default.view(what,False)
|
||||||
|
|
||||||
a = empty.view_more(what,'*').read('F')
|
a = empty.view_more(what,'*').get('F')
|
||||||
b = empty.view_more(what,True).read('F')
|
b = empty.view_more(what,True).get('F')
|
||||||
|
|
||||||
assert dict_equal(a,b)
|
assert dict_equal(a,b)
|
||||||
|
|
||||||
|
@ -85,8 +85,8 @@ class TestResult:
|
||||||
def test_view_less(self,default,what):
|
def test_view_less(self,default,what):
|
||||||
full = default.view(what,True)
|
full = default.view(what,True)
|
||||||
|
|
||||||
a = full.view_less(what,'*').read('F')
|
a = full.view_less(what,'*').get('F')
|
||||||
b = full.view_less(what,True).read('F')
|
b = full.view_less(what,True).get('F')
|
||||||
|
|
||||||
assert a == b == {}
|
assert a == b == {}
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ class TestResult:
|
||||||
default.add_stress_Cauchy('P','F')
|
default.add_stress_Cauchy('P','F')
|
||||||
default.add_calculation('sigma_y','#sigma#',unit='y')
|
default.add_calculation('sigma_y','#sigma#',unit='y')
|
||||||
default.add_equivalent_Mises('sigma_y')
|
default.add_equivalent_Mises('sigma_y')
|
||||||
assert default.read('sigma_y_vM') == {}
|
assert default.get('sigma_y_vM') == {}
|
||||||
|
|
||||||
def test_add_Mises_stress_strain(self,default):
|
def test_add_Mises_stress_strain(self,default):
|
||||||
default.add_stress_Cauchy('P','F')
|
default.add_stress_Cauchy('P','F')
|
||||||
|
@ -326,7 +326,7 @@ class TestResult:
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
if os.path.isfile(tmp_path/fname):
|
if os.path.isfile(tmp_path/fname):
|
||||||
with open(fname) as f:
|
with open(fname) as f:
|
||||||
cur = hashlib.md5(f.read().encode()).hexdigest()
|
cur = hashlib.md5(f.get().encode()).hexdigest()
|
||||||
if cur == last:
|
if cur == last:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
@ -336,7 +336,7 @@ class TestResult:
|
||||||
with open((ref_path/'save_VTK'/request.node.name).with_suffix('.md5'),'w') as f:
|
with open((ref_path/'save_VTK'/request.node.name).with_suffix('.md5'),'w') as f:
|
||||||
f.write(cur)
|
f.write(cur)
|
||||||
with open((ref_path/'save_VTK'/request.node.name).with_suffix('.md5')) as f:
|
with open((ref_path/'save_VTK'/request.node.name).with_suffix('.md5')) as f:
|
||||||
assert cur == f.read()
|
assert cur == f.get()
|
||||||
|
|
||||||
@pytest.mark.parametrize('mode',['point','cell'])
|
@pytest.mark.parametrize('mode',['point','cell'])
|
||||||
def test_vtk_mode(self,tmp_path,single_phase,mode):
|
def test_vtk_mode(self,tmp_path,single_phase,mode):
|
||||||
|
@ -352,7 +352,7 @@ class TestResult:
|
||||||
single_phase.save_XDMF()
|
single_phase.save_XDMF()
|
||||||
if update:
|
if update:
|
||||||
shutil.copy(tmp_path/fname,ref_path/fname)
|
shutil.copy(tmp_path/fname,ref_path/fname)
|
||||||
assert sorted(open(tmp_path/fname).read()) == sorted(open(ref_path/fname).read()) # XML is not ordered
|
assert sorted(open(tmp_path/fname).get()) == sorted(open(ref_path/fname).get()) # XML is not ordered
|
||||||
|
|
||||||
def test_XDMF_invalid(self,default):
|
def test_XDMF_invalid(self,default):
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
|
@ -374,7 +374,7 @@ class TestResult:
|
||||||
result = result.view(key,value)
|
result = result.view(key,value)
|
||||||
|
|
||||||
fname = request.node.name
|
fname = request.node.name
|
||||||
cur = result.read(output,compress,strip)
|
cur = result.get(output,compress,strip)
|
||||||
if update:
|
if update:
|
||||||
with bz2.BZ2File((ref_path/'read'/fname).with_suffix('.pbz2'),'w') as f:
|
with bz2.BZ2File((ref_path/'read'/fname).with_suffix('.pbz2'),'w') as f:
|
||||||
pickle.dump(cur,f)
|
pickle.dump(cur,f)
|
||||||
|
|
Loading…
Reference in New Issue