From 002383afc2902b95238a9e520e44b1d47118d6b7 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 13 Sep 2019 16:01:30 -0700 Subject: [PATCH] solved problem with postprocessing - to not 'try' with h5py library, it might have another 'try'. Check explicitly for empty argument also some polishing --- processing/post/DADF5_postResults.py | 5 ++-- processing/post/DADF5_vtk_cells.py | 10 ++++--- python/damask/dadf5.py | 40 ++++++++++++---------------- 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/processing/post/DADF5_postResults.py b/processing/post/DADF5_postResults.py index fa47805bb..136824282 100755 --- a/processing/post/DADF5_postResults.py +++ b/processing/post/DADF5_postResults.py @@ -1,9 +1,10 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- import os -import numpy as np import argparse + +import numpy as np + import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] diff --git a/processing/post/DADF5_vtk_cells.py b/processing/post/DADF5_vtk_cells.py index 75301386c..aaf8eff26 100755 --- a/processing/post/DADF5_vtk_cells.py +++ b/processing/post/DADF5_vtk_cells.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- -import os,vtk -import numpy as np +import os import argparse -import damask + +import numpy as np +import vtk from vtk.util import numpy_support +import damask + scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 44238572f..d428cfb59 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -100,18 +100,14 @@ class DADF5(): Get groups that are currently considered for evaluation. """ groups = [] - for i,x in enumerate(self.active['increments']): - group_inc = 'inc{:05}'.format(self.active['increments'][i]['inc']) #ToDo: Merge path only once at the end '/'.join(listE) + for i in self.active['increments']: + group_inc = 'inc{:05}'.format(i['inc']) #ToDo: Merge path only once at the end '/'.join(listE) for c in self.active['constituents']: - group_constituent = group_inc+'/constituent/'+c for t in self.active['c_output_types']: - group_output_types = group_constituent+'/'+t - groups.append(group_output_types) + groups.append('/'.join([group_inc,'constituent',c,t])) for m in self.active['materialpoints']: - group_materialpoint = group_inc+'/materialpoint/'+m for t in self.active['m_output_types']: - group_output_types = group_materialpoint+'/'+t - groups.append(group_output_types) + groups.append('/'.join([group_inc,'materialpoint',m,t])) return groups @@ -150,20 +146,20 @@ class DADF5(): group_inc = 'inc{:05}'.format(i['inc']) for c in self.active['constituents']: - group_constituent = group_inc+'/constituent/'+c for t in self.active['c_output_types']: try: - f[group_constituent+'/'+t+'/'+label] - path.append(group_constituent+'/'+t+'/'+label) + p = '/'.join([group_inc,'constituent',c,t,label]) + f[p] + path.append(p) except KeyError as e: print('unable to locate constituents dataset: '+ str(e)) for m in self.active['materialpoints']: - group_materialpoint = group_inc+'/materialpoint/'+m for t in self.active['m_output_types']: try: - f[group_materialpoint+'/'+t+'/'+label] - path.append(group_materialpoint+'/'+t+'/'+label) + p = '/'.join([group_inc,'materialpoint',m,t,label]) + f[p] + path.append(p) except KeyError as e: print('unable to locate materialpoints dataset: '+ str(e)) @@ -182,24 +178,22 @@ class DADF5(): dataset = np.full(shape,np.nan) for pa in path: label = pa.split('/')[2] - try: - p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] + + p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] + if len(p)>0: u = (f['mapping/cellResults/constituent'][p,c]['Position']) a = np.array(f[pa]) if len(a.shape) == 1: a=a.reshape([a.shape[0],1]) dataset[p,:] = a[u,:] - except KeyError as e: - print('unable to read constituent: '+ str(e)) - try: - p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0] + + p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0] + if len(p)>0: u = (f['mapping/cellResults/materialpoint'][p.tolist()]['Position']) a = np.array(f[pa]) if len(a.shape) == 1: a=a.reshape([a.shape[0],1]) dataset[p,:] = a[u,:] - except KeyError as e: - print('unable to read materialpoint: '+ str(e)) return dataset @@ -424,7 +418,7 @@ class DADF5(): N_not_calculated = len(todo) while N_not_calculated > 0: result = results.get() - with h5py.File(self.filename,self.mode) as f: # write to file + with h5py.File(self.filename,'a') as f: # write to file dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) for k in result['meta'].keys(): dataset_out.attrs[k] = result['meta'][k]