implemented ideas from discussion with Philip

group matching unified and with wildcard support
time step handling should become more convenient (WIP)
add_norm can not compute abs of scalar, added function for that
general polishing here and there
This commit is contained in:
Martin Diehl 2019-09-14 08:52:07 -07:00
parent c13db4b3ca
commit b85ac11c49
2 changed files with 110 additions and 57 deletions

View File

@ -24,9 +24,9 @@ parser.add_argument('filenames', nargs='+',
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory to hold output') help='name of subdirectory to hold output')
parser.add_argument('--mat', nargs='+', parser.add_argument('--mat', nargs='+',
help='labels for materialpoint/homogenization',dest='mat') help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+', parser.add_argument('--con', nargs='+',
help='labels for constituent/crystallite/constitutive',dest='con') help='labels for constituent',dest='con')
options = parser.parse_args() options = parser.parse_args()
@ -67,11 +67,9 @@ for filename in options.filenames:
x = results.get_dataset_location(label) x = results.get_dataset_location(label)
if len(x) == 0: if len(x) == 0:
continue continue
label = x[0].split('/')[-1]
array = results.read_dataset(x,0) array = results.read_dataset(x,0)
d = int(np.product(np.shape(array)[1:])) d = int(np.product(np.shape(array)[1:]))
array = np.reshape(array,[np.product(results.grid),d]) data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
data = np.concatenate((data,array),1)
if d>1: if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)]) header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])
@ -84,11 +82,9 @@ for filename in options.filenames:
x = results.get_dataset_location(label) x = results.get_dataset_location(label)
if len(x) == 0: if len(x) == 0:
continue continue
label = x[0].split('/')[-1]
array = results.read_dataset(x,0) array = results.read_dataset(x,0)
d = int(np.product(np.shape(array)[1:])) d = int(np.product(np.shape(array)[1:]))
array = np.reshape(array,[np.product(results.grid),d]) data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
data = np.concatenate((data,array),1)
if d>1: if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)]) header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])

View File

@ -1,5 +1,6 @@
from queue import Queue from queue import Queue
import re import re
import glob
import h5py import h5py
import numpy as np import numpy as np
@ -37,10 +38,12 @@ class DADF5():
self.size = f['geometry'].attrs['size'] self.size = f['geometry'].attrs['size']
r=re.compile('inc[0-9]+') r=re.compile('inc[0-9]+')
self.increments = [{'inc': int(u[3:]), self.time_information = [{'inc': int(u[3:]),
'time': round(f[u].attrs['time/s'],12), 'time': round(f[u].attrs['time/s'],12),
} for u in f.keys() if r.match(u)] } for u in f.keys() if r.match(u)]
self.increments = self.time_information.copy() # unify later
self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent']) self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent'])
self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])] self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])]
self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])] self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])]
@ -58,7 +61,7 @@ class DADF5():
self.m_output_types.append(o) self.m_output_types.append(o)
self.m_output_types = list(set(self.m_output_types)) # make unique self.m_output_types = list(set(self.m_output_types)) # make unique
self.visible= {'increments': self.increments, # ToDo:simplify, activity only positions that translate into (no complex types) self.visible= {'increments': self.increments, # ToDo:simplify, activity only positions that translate into (no complex types)
'constituents': self.constituents, 'constituents': self.constituents,
'materialpoints': self.materialpoints, 'materialpoints': self.materialpoints,
'constituent': range(self.Nconstituents), # ToDo: stupid naming 'constituent': range(self.Nconstituents), # ToDo: stupid naming
@ -99,6 +102,23 @@ class DADF5():
self.__visible_set(a,t,a) self.__visible_set(a,t,a)
def increment_set_by_time(self,start,end):
for t in self.time_information:
if start<= t['time']< end:
print(t)
def increment_set_by_position(self,start,end):
for t in self.time_information[start:end]:
print(t)
def increment_set(self,start,end):
for t in self.time_information:
if start<= t['inc']< end:
print(t)
def constituent_output_iter(self): def constituent_output_iter(self):
return self.__visible_iter('c_output_types') return self.__visible_iter('c_output_types')
@ -167,37 +187,58 @@ class DADF5():
# ToDo: store increments, select icrements (trivial), position, and time # ToDo: store increments, select icrements (trivial), position, and time
def groups_with_datasets(self,datasets):
def get_groups(self,l): #group_with_data(datasets)
""" """
Get groups that contain all requested datasets. Get groups that contain all requested datasets.
Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/*
are considered as they contain the data.
Single strings will be treated as list with one entry.
Wild card matching is allowed, but the number of arguments need to fit.
Parameters Parameters
---------- ----------
l : list of str datasets : iterable or str or boolean
Names of datasets that need to be located in the group.
Examples
--------
datasets = False matches no group
datasets = True matches all groups
datasets = ['F','P'] matches a group with ['F','P','sigma']
datasets = ['*','P'] matches a group with ['F','P','sigma']
datasets = ['*'] does not matche a group with ['F','P','sigma']
datasets = ['*','*'] does not matche a group with ['F','P','sigma']
datasets = ['*','*','*'] matches a group with ['F','P','sigma']
""" """
if datasets is False: return []
if isinstance(datasets,str):
s = [datasets]
else:
s = datasets
groups = [] groups = []
if type(l) is not list:
raise TypeError('Candidates should be given as a list')
with h5py.File(self.filename,'r') as f: with h5py.File(self.filename,'r') as f:
for g in self.get_active_groups(): for i in self.visible['increments']:
if set(l).issubset(f[g].keys()): groups.append(g) group_inc = 'inc{:05}'.format(i['inc']) #ToDo: Merge path only once at the end '/'.join(listE)
return groups for c in self.constituent_iter():
for t in self.constituent_output_iter():
group = '/'.join([group_inc,'constituent',c,t])
def get_active_groups(self): # rename: get_groups needed? merge with datasets and have [] and ['*'] if datasets is True:
"""Get groups that are currently considered for evaluation.""" groups.append(group)
groups = [] else:
for i in self.visible['increments']: match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in datasets] for e in e_]
group_inc = 'inc{:05}'.format(i['inc']) #ToDo: Merge path only once at the end '/'.join(listE) if len(set(match)) == len(s) : groups.append(group)
for c in self.visible['constituents']: for m in self.materialpoint_iter():
for t in self.visible['c_output_types']: for t in self.materialpoint_output_iter():
groups.append('/'.join([group_inc,'constituent',c,t])) group = '/'.join([group_inc,'materialpoint',m,t])
for m in self.visible['materialpoints']: if datasets is True:
for t in self.visible['m_output_types']: groups.append(group)
groups.append('/'.join([group_inc,'materialpoint',m,t])) else:
match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in datasets] for e in e_]
if len(set(match)) == len(s) : groups.append(group)
return groups return groups
@ -316,14 +357,17 @@ class DADF5():
def add_Mises(self,x): def add_Mises(self,x):
"""Adds the equivalent Mises stress or strain of a tensor.""" """Adds the equivalent Mises stress or strain of a tensor."""
def deviator(x): def Mises(x):
if x['meta']['Unit'] == 'Pa': #ToDo: Should we use this? Then add_Cauchy and add_strain_tensors also should perform sanity checks if x['meta']['Unit'] == b'Pa': #ToDo: Should we use this? Then add_Cauchy and add_strain_tensors also should perform sanity checks
factor = 3.0/2.0 factor = 3.0/2.0
elif x['meta']['Unit'] == '-': t = 'stress'
elif x['meta']['Unit'] == b'1':
factor = 2.0/3.0 factor = 2.0/3.0
t = 'strain'
else: else:
ValueError print(x['meta']['Unit'])
raise ValueError
d = x['data'] d = x['data']
dev = d - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[d.shape[0],3,3]),np.trace(d,axis1=1,axis2=2)/3.0) dev = d - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[d.shape[0],3,3]),np.trace(d,axis1=1,axis2=2)/3.0)
@ -331,35 +375,29 @@ class DADF5():
return { return {
'data' : np.sqrt(np.einsum('ijk->i',dev**2)*factor), 'data' : np.sqrt(np.einsum('ijk->i',dev**2)*factor),
'label' : 'Mises({})'.format(x['label']), 'label' : '{}_vM'.format(x['label']),
'meta' : { 'meta' : {
'Unit' : x['meta']['Unit'], 'Unit' : x['meta']['Unit'],
'Description' : 'Mises equivalent stress of {} ({})'.format(x['label'],x['meta']['Description']), 'Description' : 'Mises equivalent {} of {} ({})'.format(t,x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_Mises_stress vXXXXX' 'Creator' : 'dadf5.py:add_Mises_stress vXXXXX'
} }
} }
requested = [{'label':x,'arg':'x'}] requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(deviator,requested) self.__add_generic_pointwise(Mises,requested)
def add_norm(self,x,ord=None): def add_norm(self,x,ord=None):
""" """
Adds norm of vector or tensor or magnitude of a scalar. Adds norm of vector or tensor.
See numpy.linalg.norm manual for details. See numpy.linalg.norm manual for details.
""" """
def norm(x,ord): def norm(x,ord):
o = ord o = ord
if len(x['data'].shape) == 1: if len(x['data'].shape) == 2:
axis = 0
t = 'scalar'
if o is None: o = 2
elif len(x['data'].shape) == 2:
axis = 1 axis = 1
t = 'vector' t = 'vector'
if o is None: o = 2 if o is None: o = 2
@ -372,7 +410,7 @@ class DADF5():
return { return {
'data' : np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), 'data' : np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True),
'label' : '|{}|_{}'.format(x['label'],ord), 'label' : '|{}|_{}'.format(x['label'],o),
'meta' : { 'meta' : {
'Unit' : x['meta']['Unit'], 'Unit' : x['meta']['Unit'],
'Description' : '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), 'Description' : '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']),
@ -385,6 +423,25 @@ class DADF5():
self.__add_generic_pointwise(norm,requested,{'ord':ord}) self.__add_generic_pointwise(norm,requested,{'ord':ord})
def add_absolute(self,x):
"""Adds absolute value."""
def absolute(x):
return {
'data' : np.abs(x['data']),
'label' : '|{}|'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_abs vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(absolute,requested)
def add_determinant(self,x): def add_determinant(self,x):
"""Adds the determinant component of a tensor.""" """Adds the determinant component of a tensor."""
def determinant(x): def determinant(x):
@ -516,7 +573,7 @@ class DADF5():
todo = [] todo = []
# ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task
for group in self.get_groups([d['label'] for d in datasets_requested]): for group in self.groups_with_datasets([d['label'] for d in datasets_requested]):
with h5py.File(self.filename,'r') as f: with h5py.File(self.filename,'r') as f:
datasets_in = {} datasets_in = {}
for d in datasets_requested: for d in datasets_requested: