From 501465dfd1d6d456faed9c49032297dd474537d4 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 17 Dec 2021 09:42:10 +0100 Subject: [PATCH 1/6] more alpha releases than expected hopefully, all DAMASK 2 users have been migrated once 3.0 is released --- python/damask/_grid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/damask/_grid.py b/python/damask/_grid.py index 135cc6b66..d852f0642 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -197,7 +197,7 @@ class Grid: Grid-based geometry from file. """ - warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) + warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2) try: f = open(fname) except TypeError: @@ -629,7 +629,7 @@ class Grid: Compress geometry with 'x of y' and 'a to b'. """ - warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) + warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2) header = [f'{len(self.comments)+4} header'] + self.comments \ + ['grid a {} b {} c {}'.format(*self.cells), 'size x {} y {} z {}'.format(*self.size), From 0b6af19e5468d4867baba1e7b512363eb71fbffb Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 17 Dec 2021 09:47:58 +0100 Subject: [PATCH 2/6] use of keywords is more intuitive code needs cleaning after revoming of 'what' and 'datasets'. For the moment, keep the old style for compatibility with existing evaluation scripts --- python/damask/_result.py | 115 +++++++++++++++++++++++++++++------- python/tests/test_Result.py | 22 +++---- 2 files changed, 104 insertions(+), 33 deletions(-) diff --git a/python/damask/_result.py b/python/damask/_result.py index 141b795c2..981b0dcb3 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -4,6 +4,7 @@ import fnmatch import os import copy import datetime +import warnings import xml.etree.ElementTree as ET import xml.dom.minidom from pathlib import Path @@ -27,6 +28,20 @@ h5py3 = h5py.__version__[0] == '3' chunk_size = 1024**2//8 # for compression in HDF5 +def _view_transition(what,datasets,increments,times,phases,homogenizations,fields): + if (datasets is not None and what is None) or (what is not None and datasets is None): + raise ValueError('"what" and "datasets" need to be used as a pair') + if datasets is not None or what is not None: + warnings.warn('Arguments "what" and "datasets" will be removed in DAMASK v3.0.0-alpha7', DeprecationWarning,2) + return what,datasets + if sum(1 for _ in filter(None.__ne__, [increments,times,phases,homogenizations,fields])) > 1: + raise ValueError('Only one out of "increments", "times", "phases", "homogenizations", and "fields" can be used') + else: + if increments is not None: return "increments", increments + if times is not None: return "times", times + if phases is not None: return "phases", phases + if homogenizations is not None: return "homogenizations", homogenizations + if fields is not None: return "fields", fields def _read(dataset): """Read a dataset and its metadata into a numpy.ndarray.""" @@ -79,7 +94,7 @@ class Result: >>> r.add_Cauchy() >>> r.add_equivalent_Mises('sigma') >>> r.export_VTK() - >>> r_last = r.view('increments',-1) + >>> r_last = r.view(increments=-1) >>> sigma_vM_last = r_last.get('sigma_vM') """ @@ -155,10 +170,10 @@ class Result: """Show summary of file content.""" visible_increments = self.visible['increments'] - first = self.view('increments',visible_increments[0:1]).list_data() + first = self.view(increments=visible_increments[0:1]).list_data() last = '' if len(visible_increments) < 2 else \ - self.view('increments',visible_increments[-1:]).list_data() + self.view(increments=visible_increments[-1:]).list_data() in_between = '' if len(visible_increments) < 3 else \ ''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-1]]) @@ -285,7 +300,6 @@ class Result: selected.append(self.increments[i]) return selected - def times_in_range(self,start,end): """ Get all increments within a given time range. @@ -310,17 +324,35 @@ class Result: return selected - def view(self,what,datasets): + def view(self,what=None,datasets=None,*, + increments=None, + times=None, + phases=None, + homogenizations=None, + fields=None): """ Set view. + Wildcard matching with '?' and '*' is supported. + True is equivalent to '*', False is equivalent to []. + Parameters ---------- what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} - Attribute to change. + Attribute to change. DEPRECATED. datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool - Name of datasets; supports '?' and '*' wildcards. + Name of datasets; supports '?' and '*' wildcards. DEPRECATED. True is equivalent to '*', False is equivalent to []. + increments: (list of) int, (list of) str, or bool, optional. + Number(s) of increments to select. + times: (list of) float, (list of) str, or bool, optional. + Simulation time(s) of increments to select. + phases: (list of) str, or bool, optional. + Name(s) of phases to select. + homogenizations: (list of) str, or bool, optional. + Name(s) of homogenizations to select. + fields: (list of) str, or bool, optional. + Name(s) of fields to select. Returns ------- @@ -333,29 +365,48 @@ class Result: >>> import damask >>> r = damask.Result('my_file.hdf5') - >>> r_first = r.view('increment',0) + >>> r_first = r.view(increment=0) Get a view that shows all results between simulation times of 10 to 40: >>> import damask >>> r = damask.Result('my_file.hdf5') - >>> r_t10to40 = r.view('times',r.times_in_range(10.0,40.0)) + >>> r_t10to40 = r.view(times=r.times_in_range(10.0,40.0)) """ - return self._manage_view('set',what,datasets) + what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) + return self._manage_view('set',what_,datasets_) - def view_more(self,what,datasets): + def view_more(self,what=None,datasets=None,*, + increments=None, + times=None, + phases=None, + homogenizations=None, + fields=None): """ Add to view. + Wildcard matching with '?' and '*' is supported. + True is equivalent to '*', False is equivalent to []. + Parameters ---------- what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} - Attribute to change. + Attribute to change. DEPRECATED. datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool - Name of datasets; supports '?' and '*' wildcards. + Name of datasets; supports '?' and '*' wildcards. DEPRECATED. True is equivalent to '*', False is equivalent to []. + increments: (list of) int, (list of) str, or bool, optional. + Number(s) of increments to select. + times: (list of) float, (list of) str, or bool, optional. + Simulation time(s) of increments to select. + phases: (list of) str, or bool, optional. + Name(s) of phases to select. + homogenizations: (list of) str, or bool, optional. + Name(s) of homogenizations to select. + fields: (list of) str, or bool, optional. + Name(s) of fields to select. Returns ------- @@ -367,25 +418,44 @@ class Result: Get a view that shows only results from first and last increment: >>> import damask - >>> r_empty = damask.Result('my_file.hdf5').view('increments',False) - >>> r_first = r_empty.view_more('increments',0) - >>> r_first_and_last = r.first.view_more('increments',-1) + >>> r_empty = damask.Result('my_file.hdf5').view(increments=False) + >>> r_first = r_empty.view_more(increments=0) + >>> r_first_and_last = r.first.view_more(increments=-1) """ - return self._manage_view('add',what,datasets) + what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) + return self._manage_view('add',what_,datasets_) - def view_less(self,what,datasets): + def view_less(self,what=None,datasets=None,*, + increments=None, + times=None, + phases=None, + homogenizations=None, + fields=None): """ Remove from view. + Wildcard matching with '?' and '*' is supported. + True is equivalent to '*', False is equivalent to []. + Parameters ---------- what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} - Attribute to change. + Attribute to change. DEPRECATED. datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool - Name of datasets; supports '?' and '*' wildcards. + Name of datasets; supports '?' and '*' wildcards. DEPRECATED. True is equivalent to '*', False is equivalent to []. + increments: (list of) int, (list of) str, or bool, optional. + Number(s) of increments to select. + times: (list of) float, (list of) str, or bool, optional. + Simulation time(s) of increments to select. + phases: (list of) str, or bool, optional. + Name(s) of phases to select. + homogenizations: (list of) str, or bool, optional. + Name(s) of homogenizations to select. + fields: (list of) str, or bool, optional. + Name(s) of fields to select. Returns ------- @@ -398,10 +468,11 @@ class Result: >>> import damask >>> r_all = damask.Result('my_file.hdf5') - >>> r_deformed = r_all.view_less('increments',0) + >>> r_deformed = r_all.view_less(increments=0) """ - return self._manage_view('del',what,datasets) + what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) + return self._manage_view('del',what_,datasets_) def rename(self,name_src,name_dst): diff --git a/python/tests/test_Result.py b/python/tests/test_Result.py index 3f2555aec..3bc363ebc 100644 --- a/python/tests/test_Result.py +++ b/python/tests/test_Result.py @@ -25,7 +25,7 @@ def default(tmp_path,ref_path): fname = '12grains6x7x8_tensionY.hdf5' shutil.copy(ref_path/fname,tmp_path) f = Result(tmp_path/fname) - return f.view('times',20.0) + return f.view(times=20.0) @pytest.fixture def single_phase(tmp_path,ref_path): @@ -58,14 +58,14 @@ class TestResult: def test_view_all(self,default): - a = default.view('increments',True).get('F') + a = default.view(increments=True).get('F') - assert dict_equal(a,default.view('increments','*').get('F')) - assert dict_equal(a,default.view('increments',default.increments_in_range(0,np.iinfo(int).max)).get('F')) + assert dict_equal(a,default.view(increments='*').get('F')) + assert dict_equal(a,default.view(increments=default.increments_in_range(0,np.iinfo(int).max)).get('F')) - assert dict_equal(a,default.view('times',True).get('F')) - assert dict_equal(a,default.view('times','*').get('F')) - assert dict_equal(a,default.view('times',default.times_in_range(0.0,np.inf)).get('F')) + assert dict_equal(a,default.view(times=True).get('F')) + assert dict_equal(a,default.view(times='*').get('F')) + assert dict_equal(a,default.view(times=default.times_in_range(0.0,np.inf)).get('F')) @pytest.mark.parametrize('what',['increments','times','phases','fields']) # ToDo: discuss homogenizations def test_view_none(self,default,what): @@ -314,7 +314,7 @@ class TestResult: @pytest.mark.parametrize('overwrite',['off','on']) def test_add_overwrite(self,default,overwrite): - last = default.view('increments',-1) + last = default.view(increments=-1) last.add_stress_Cauchy() @@ -377,7 +377,7 @@ class TestResult: @pytest.mark.parametrize('inc',[4,0],ids=range(2)) @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute') def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc): - result = Result(ref_path/fname).view('increments',inc) + result = Result(ref_path/fname).view(increments=inc) os.chdir(tmp_path) result.export_VTK(output,parallel=False) fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti' @@ -400,7 +400,7 @@ class TestResult: result.export_VTK(output,mode) def test_marc_coordinates(self,ref_path): - result = Result(ref_path/'check_compile_job1.hdf5').view('increments',-1) + result = Result(ref_path/'check_compile_job1.hdf5').view(increments=-1) c_n = result.coordinates0_node + result.get('u_n') c_p = result.coordinates0_point + result.get('u_p') assert len(c_n) > len(c_p) @@ -440,7 +440,7 @@ class TestResult: dim_xdmf = reader_xdmf.GetOutput().GetDimensions() bounds_xdmf = reader_xdmf.GetOutput().GetBounds() - single_phase.view('increments',0).export_VTK(parallel=False) + single_phase.view(increments=0).export_VTK(parallel=False) fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti' reader_vti = vtk.vtkXMLImageDataReader() reader_vti.SetFileName(fname) From c1eabc33846df7a1e77e366fe443b1df29576cb3 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 17 Dec 2021 10:31:41 +0100 Subject: [PATCH 3/6] simplified interface --- python/damask/_result.py | 58 +++++++++++++++---------------------- python/tests/test_Result.py | 10 +++---- 2 files changed, 28 insertions(+), 40 deletions(-) diff --git a/python/damask/_result.py b/python/damask/_result.py index 981b0dcb3..2453001a6 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -246,36 +246,6 @@ class Result: return dup - def modification_enable(self): - """ - Allow modification of existing data. - - Returns - ------- - modified_view : damask.Result - View without write-protection of existing data. - - """ - print(util.warn('Warning: Modification of existing datasets allowed!')) - dup = self.copy() - dup._allow_modification = True - return dup - - def modification_disable(self): - """ - Prevent modification of existing data (default case). - - Returns - ------- - modified_view : damask.Result - View with write-protection of existing data. - - """ - dup = self.copy() - dup._allow_modification = False - return dup - - def increments_in_range(self,start,end): """ Get all increments within a given range. @@ -329,7 +299,8 @@ class Result: times=None, phases=None, homogenizations=None, - fields=None): + fields=None, + protected=None): """ Set view. @@ -353,6 +324,8 @@ class Result: Name(s) of homogenizations to select. fields: (list of) str, or bool, optional. Name(s) of fields to select. + protected: bool, optional. + Protection status of existing data. Returns ------- @@ -374,8 +347,23 @@ class Result: >>> r_t10to40 = r.view(times=r.times_in_range(10.0,40.0)) """ - what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) - return self._manage_view('set',what_,datasets_) + v = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) + if protected is not None: + if v == None: + dup = self.copy() + else: + what_,datasets_ = v + dup = self._manage_view('set',what_,datasets_) + else: + what_,datasets_ = v + dup = self._manage_view('set',what_,datasets_) + + if protected is False: + dup._allow_modification = True + if protected is True: + print(util.warn('Warning: Modification of existing datasets allowed!')) + dup._allow_modification = False + return dup def view_more(self,what=None,datasets=None,*, @@ -495,7 +483,7 @@ class Result: >>> import damask >>> r = damask.Result('my_file.hdf5') - >>> r_unprotected = r.modification_enable() + >>> r_unprotected = r.view(protected=False) >>> r_unprotected.rename('F','def_grad') """ @@ -534,7 +522,7 @@ class Result: >>> import damask >>> r = damask.Result('my_file.hdf5') - >>> r_unprotected = r.modification_enable() + >>> r_unprotected = r.view(protected=False) >>> r_unprotected.remove('F') """ diff --git a/python/tests/test_Result.py b/python/tests/test_Result.py index 3bc363ebc..4fea08eb3 100644 --- a/python/tests/test_Result.py +++ b/python/tests/test_Result.py @@ -322,9 +322,9 @@ class TestResult: created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z') if overwrite == 'on': - last = last.modification_enable() + last = last.view(protected=False) else: - last = last.modification_disable() + last = last.view(protected=True) time.sleep(2.) try: @@ -344,10 +344,10 @@ class TestResult: def test_rename(self,default,allowed): if allowed == 'on': F = default.place('F') - default = default.modification_enable() + default = default.view(protected=False) default.rename('F','new_name') assert np.all(F == default.place('new_name')) - default = default.modification_disable() + default = default.view(protected=True) with pytest.raises(PermissionError): default.rename('P','another_new_name') @@ -355,7 +355,7 @@ class TestResult: @pytest.mark.parametrize('allowed',['off','on']) def test_remove(self,default,allowed): if allowed == 'on': - unsafe = default.modification_enable() + unsafe = default.view(protected=False) unsafe.remove('F') assert unsafe.get('F') is None else: From a1e42af8602a63f7c5d8fbaa1b71cdd5028f117a Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 17 Dec 2021 10:37:45 +0100 Subject: [PATCH 4/6] easier to understand --- python/damask/_result.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/python/damask/_result.py b/python/damask/_result.py index 2453001a6..019f9c87d 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -156,7 +156,7 @@ class Result: self.fname = Path(fname).absolute() - self._allow_modification = False + self._protected = True def __copy__(self): @@ -349,20 +349,18 @@ class Result: """ v = _view_transition(what,datasets,increments,times,phases,homogenizations,fields) if protected is not None: - if v == None: + if v is None: dup = self.copy() else: what_,datasets_ = v dup = self._manage_view('set',what_,datasets_) + if protected is True: + print(util.warn('Warning: Modification of existing datasets allowed!')) + dup._protected = protected else: what_,datasets_ = v dup = self._manage_view('set',what_,datasets_) - if protected is False: - dup._allow_modification = True - if protected is True: - print(util.warn('Warning: Modification of existing datasets allowed!')) - dup._allow_modification = False return dup @@ -487,7 +485,7 @@ class Result: >>> r_unprotected.rename('F','def_grad') """ - if not self._allow_modification: + if self._protected: raise PermissionError('Renaming datasets not permitted') with h5py.File(self.fname,'a') as f: @@ -526,7 +524,7 @@ class Result: >>> r_unprotected.remove('F') """ - if not self._allow_modification: + if self._protected: raise PermissionError('Removing datasets not permitted') with h5py.File(self.fname,'a') as f: @@ -1417,7 +1415,7 @@ class Result: lock.acquire() with h5py.File(self.fname, 'a') as f: try: - if self._allow_modification and '/'.join([group,result['label']]) in f: + if not self._protected and '/'.join([group,result['label']]) in f: dataset = f['/'.join([group,result['label']])] dataset[...] = result['data'] dataset.attrs['overwritten'] = True From 2fc25c6fa9d81783a15b2ab243e706954f3259ca Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 17 Dec 2021 14:28:12 +0100 Subject: [PATCH 5/6] correct reporting --- python/damask/_result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/_result.py b/python/damask/_result.py index 019f9c87d..f47a7da80 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -354,7 +354,7 @@ class Result: else: what_,datasets_ = v dup = self._manage_view('set',what_,datasets_) - if protected is True: + if not protected: print(util.warn('Warning: Modification of existing datasets allowed!')) dup._protected = protected else: From ac7caa0a643337403f45bf212442d404129f759c Mon Sep 17 00:00:00 2001 From: Test User Date: Sat, 25 Dec 2021 08:39:13 +0100 Subject: [PATCH 6/6] [skip ci] updated version information after successful test of v3.0.0-alpha5-323-g2ea293063 --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index 852dd348b..56ff18f14 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-313-g8c2266bc5 +v3.0.0-alpha5-323-g2ea293063