function renaming; rewording of help messages

This commit is contained in:
Philip Eisenlohr 2021-04-01 17:33:45 -04:00
parent 84e117c6b3
commit f6d6aefe1d
3 changed files with 66 additions and 61 deletions

View File

@ -345,7 +345,7 @@ class Result:
if self._allow_modification: if self._allow_modification:
with h5py.File(self.fname,'a') as f: with h5py.File(self.fname,'a') as f:
for path_old in self.get_dataset_location(name_old): for path_old in self.get_dataset_location(name_old):
path_new = os.path.join(os.path.dirname(path_old),name_new) path_new = '/'.join([os.path.dirname(path_old),name_new])
f[path_new] = f[path_old] f[path_new] = f[path_old]
f[path_new].attrs['Renamed'] = f'Original name: {name_old}' if h5py3 else \ f[path_new].attrs['Renamed'] = f'Original name: {name_old}' if h5py3 else \
f'Original name: {name_old}'.encode() f'Original name: {name_old}'.encode()
@ -1326,25 +1326,24 @@ class Result:
v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}') v.save(f'{self.fname.stem}_inc{inc[ln:].zfill(N_digits)}')
def read(self,output,compress=True,strip=True): def read(self,output,flatten=True,prune=True):
""" """
Export data from file per phase/homogenization. Export data per phase/homogenization.
The returned data structure reflects the group/folder structure The returned data structure reflects the group/folder structure
in the DADF5 file. in the DADF5 file.
Parameters Parameters
---------- ----------
output : str or list of, optional output : str or list of str
Name of the datasets to include. Labels of the datasets to read.
compress : bool flatten : bool
Squeeze out dictionaries that are not needed for a unique Remove singular levels of the folder hierarchy.
structure. This might be beneficial in the case of single This might be beneficial in case of a
constituent or single phase simulations or if only one single constituent, phase, or increment.
time increment is considered. Defaults to 'True'. Defaults to True.
strip : bool prune : bool
Remove branches that contain no dataset. Defaults to Remove branches with no data. Defaults to True.
'True'.
""" """
r = {} r = {}
@ -1365,53 +1364,49 @@ class Result:
for out in output_.intersection(f['/'.join((inc,ty,label,field))].keys()): for out in output_.intersection(f['/'.join((inc,ty,label,field))].keys()):
r[inc][ty][label][field][out] = _read(f,'/'.join((inc,ty,label,field,out))) r[inc][ty][label][field][out] = _read(f,'/'.join((inc,ty,label,field,out)))
if strip: r = util.dict_strip(r) if prune: r = util.dict_prune(r)
if compress: r = util.dict_compress(r) if flatten: r = util.dict_flatten(r)
return r return r
def place(self,output,compress=True,strip=True,constituents=None,fill_float=0.0,fill_int=0): def place(self,output,flatten=True,prune=True,constituents=None,fill_float=0.0,fill_int=0):
""" """
Export data from file suitable sorted for spatial operations. Export data in spatial order that is compatible with the damask.VTK geometry representation.
The returned data structure reflects the group/folder structure The returned data structure reflects the group/folder structure
in the DADF5 file. In the case of multi phase simulations, the in the DADF5 file.
data is merged from the individual phases/homogenizations.
In the cases of a single constituent and single phase simulation Multi-phase data is fused into a single output.
this function is equivalent to `read`. `place` is equivalent to `read` if only one phase and one constituent is present.
Parameters Parameters
---------- ----------
output : str or list of, optional output : str or list of, optional
Labels of the datasets to be read. Labels of the datasets to place.
compress : bool flatten : bool
Squeeze out dictionaries that are not needed for a unique Remove singular levels of the folder hierarchy.
structure. This might be beneficial in the case of single This might be beneficial in case of a
phase simulations or if only one time increment is single constituent, phase, or increment.
considered. Defaults to 'True'. Defaults to True.
strip : bool prune : bool
Remove branches that contain no dataset. Defaults to Remove branches with no data. Defaults to True.
'True'.
constituents : int or list of, optional constituents : int or list of, optional
Constituents to consider. Defaults to 'None', in which case Constituents to consider. Defaults to 'None', in which case
all constituents are considered. all constituents are considered.
fill_float : float fill_float : float
Fill value for non existent entries of floating point type. Fill value for non-existent entries of floating point type.
Defaults to 0.0. Defaults to 0.0.
fill_int : int fill_int : int
Fill value for non existent entries of integer type. Fill value for non-existent entries of integer type.
Defaults to 0. Defaults to 0.
""" """
r = {} r = {}
output_ = set([output] if isinstance(output,str) else output) output_ = set([output] if isinstance(output,str) else output)
if constituents is None: constituents_ = range(self.N_constituents) if constituents is None else \
constituents_ = range(self.N_constituents) constituents if isinstance(constituents,Iterable) else [constituents]
else:
constituents_ = constituents if isinstance(constituents,Iterable) else [constituents]
suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \ suffixes = [''] if self.N_constituents == 1 or isinstance(constituents,int) else \
[f'#{c}' for c in constituents_] [f'#{c}' for c in constituents_]
@ -1425,30 +1420,30 @@ class Result:
at_cell_ph = [] at_cell_ph = []
in_data_ph = [] in_data_ph = []
for c in range(self.N_constituents): for c in range(self.N_constituents):
at_cell_ph.append({label: np.where(f[os.path.join(grp,'phase')][:,c][name] == label.encode())[0] \ at_cell_ph.append({label: np.where(f['/'.join((grp,'phase'))][:,c][name] == label.encode())[0] \
for label in self.visible['phases']}) for label in self.visible['phases']})
in_data_ph.append({label: f[os.path.join(grp,'phase')][member][at_cell_ph[c][label]][...,c] \ in_data_ph.append({label: f['/'.join((grp,'phase'))][member][at_cell_ph[c][label]][...,c] \
for label in self.visible['phases']}) for label in self.visible['phases']})
at_cell_ho = {label: np.where(f[os.path.join(grp,'homogenization')][:][name] == label.encode())[0] \ at_cell_ho = {label: np.where(f['/'.join((grp,'homogenization'))][:][name] == label.encode())[0] \
for label in self.visible['homogenizations']} for label in self.visible['homogenizations']}
in_data_ho = {label: f[os.path.join(grp,'homogenization')][member][at_cell_ho[label]] \ in_data_ho = {label: f['/'.join((grp,'homogenization'))][member][at_cell_ho[label]] \
for label in self.visible['homogenizations']} for label in self.visible['homogenizations']}
for inc in util.show_progress(self.visible['increments']): for inc in util.show_progress(self.visible['increments']):
r[inc] = {'phase':{},'homogenization':{},'geometry':{}} r[inc] = {'phase':{},'homogenization':{},'geometry':{}}
for out in output_.intersection(f[os.path.join(inc,'geometry')].keys()): for out in output_.intersection(f['/'.join((inc,'geometry'))].keys()):
r[inc]['geometry'][out] = _read(f,os.path.join(inc,'geometry',out)) r[inc]['geometry'][out] = _read(f,'/'.join((inc,'geometry',out)))
for ty in ['phase','homogenization']: for ty in ['phase','homogenization']:
for label in self.visible[ty+'s']: for label in self.visible[ty+'s']:
for field in f[os.path.join(inc,ty,label)].keys(): for field in f['/'.join((inc,ty,label))].keys():
if field not in r[inc][ty].keys(): if field not in r[inc][ty].keys():
r[inc][ty][field] = {} r[inc][ty][field] = {}
for out in output_.intersection(f[os.path.join(inc,ty,label,field)].keys()): for out in output_.intersection(f['/'.join((inc,ty,label,field))].keys()):
data = ma.array(_read(f,os.path.join(inc,ty,label,field,out))) data = ma.array(_read(f,'/'.join((inc,ty,label,field,out))))
if ty == 'phase': if ty == 'phase':
if out+suffixes[0] not in r[inc][ty][field].keys(): if out+suffixes[0] not in r[inc][ty][field].keys():
@ -1472,7 +1467,7 @@ class Result:
r[inc][ty][field][out][at_cell_ho[label]] = data[in_data_ho[label]] r[inc][ty][field][out][at_cell_ho[label]] = data[in_data_ho[label]]
if strip: r = util.dict_strip(r) if prune: r = util.dict_prune(r)
if compress: r = util.dict_compress(r) if flatten: r = util.dict_flatten(r)
return r return r

View File

@ -26,7 +26,7 @@ __all__=[
'shapeshifter', 'shapeblender', 'shapeshifter', 'shapeblender',
'extend_docstring', 'extended_docstring', 'extend_docstring', 'extended_docstring',
'DREAM3D_base_group', 'DREAM3D_cell_data_group', 'DREAM3D_base_group', 'DREAM3D_cell_data_group',
'dict_strip', 'dict_compress' 'dict_prune', 'dict_flatten'
] ]
# https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py # https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
@ -404,41 +404,51 @@ def DREAM3D_cell_data_group(fname):
return cell_data_group return cell_data_group
def dict_strip(d): def dict_prune(d):
""" """
Remove recursively empty dictionaries. Recursively remove empty dictionaries.
Parameters Parameters
---------- ----------
d : dict d : dict
dictionary. Dictionary to prune.
Returns
-------
pruned : dict
Pruned dictionary.
""" """
# https://stackoverflow.com/questions/48151953 # https://stackoverflow.com/questions/48151953
new = {} new = {}
for k,v in d.items(): for k,v in d.items():
if isinstance(v, dict): if isinstance(v, dict):
v = dict_strip(v) v = dict_prune(v)
if not isinstance(v,dict) or v != {}: if not isinstance(v,dict) or v != {}:
new[k] = v new[k] = v
return new return new
def dict_compress(d): def dict_flatten(d):
""" """
Remove recursively dictionaries with one entry. Recursively remove keys of single-entry dictionaries.
Parameters Parameters
---------- ----------
d : dict d : dict
dictionary. Dictionary to flatten.
Returns
-------
flattened : dict
Flattened dictionary.
""" """
if isinstance(d,dict) and len(d) == 1: if isinstance(d,dict) and len(d) == 1:
entry = d[list(d.keys())[0]] entry = d[list(d.keys())[0]]
new = dict_compress(entry.copy()) if isinstance(entry,dict) else entry new = dict_flatten(entry.copy()) if isinstance(entry,dict) else entry
else: else:
new = {k: (dict_compress(v) if isinstance(v, dict) else v) for k,v in d.items()} new = {k: (dict_flatten(v) if isinstance(v, dict) else v) for k,v in d.items()}
return new return new

View File

@ -143,8 +143,8 @@ class TestUtil:
({'A':{'B':{}}}, {}), ({'A':{'B':{}}}, {}),
({'A':{'B':'C'}},)*2, ({'A':{'B':'C'}},)*2,
({'A':{'B':{},'C':'D'}}, {'A':{'C':'D'}})]) ({'A':{'B':{},'C':'D'}}, {'A':{'C':'D'}})])
def test_strip(self,full,reduced): def test_prune(self,full,reduced):
assert util.dict_strip(full) == reduced assert util.dict_prune(full) == reduced
@pytest.mark.parametrize('full,reduced',[({}, {}), @pytest.mark.parametrize('full,reduced',[({}, {}),
@ -156,5 +156,5 @@ class TestUtil:
({'A':{'B':'C','D':'E'}}, {'B':'C','D':'E'}), ({'A':{'B':'C','D':'E'}}, {'B':'C','D':'E'}),
({'B':'C','D':'E'},)*2, ({'B':'C','D':'E'},)*2,
({'A':{'B':{},'C':'D'}}, {'B':{},'C':'D'})]) ({'A':{'B':{},'C':'D'}}, {'B':{},'C':'D'})])
def test_compress(self,full,reduced): def test_flatten(self,full,reduced):
assert util.dict_compress(full) == reduced assert util.dict_flatten(full) == reduced