2019-09-12 06:33:19 +05:30
|
|
|
from queue import Queue
|
2019-04-13 14:41:32 +05:30
|
|
|
import re
|
2019-09-12 06:33:19 +05:30
|
|
|
|
|
|
|
import h5py
|
2019-04-17 23:27:16 +05:30
|
|
|
import numpy as np
|
2019-09-12 06:33:19 +05:30
|
|
|
|
2019-05-20 23:24:57 +05:30
|
|
|
from . import util
|
2019-04-13 14:41:32 +05:30
|
|
|
|
|
|
|
# ------------------------------------------------------------------
|
|
|
|
class DADF5():
|
2019-09-12 06:33:19 +05:30
|
|
|
"""
|
|
|
|
Read and write to DADF5 files.
|
|
|
|
|
|
|
|
DADF5 files contain DAMASK results.
|
|
|
|
"""
|
2019-04-13 14:41:32 +05:30
|
|
|
|
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def __init__(self,
|
|
|
|
filename,
|
|
|
|
mode = 'r',
|
|
|
|
):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""
|
|
|
|
Opens an existing DADF5 file.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
filename : str
|
|
|
|
name of the DADF5 file to be openend.
|
|
|
|
mode : str, optional
|
|
|
|
filemode for opening, either 'r' or 'a'.
|
2019-04-13 14:41:32 +05:30
|
|
|
|
2019-09-12 06:33:19 +05:30
|
|
|
"""
|
2019-04-13 14:41:32 +05:30
|
|
|
if mode not in ['a','r']:
|
|
|
|
print('Invalid file access mode')
|
2019-05-20 23:24:57 +05:30
|
|
|
else:
|
2019-04-13 14:41:32 +05:30
|
|
|
with h5py.File(filename,mode):
|
|
|
|
pass
|
|
|
|
|
|
|
|
with h5py.File(filename,'r') as f:
|
|
|
|
|
2019-05-03 10:16:22 +05:30
|
|
|
if f.attrs['DADF5-major'] != 0 or f.attrs['DADF5-minor'] != 2:
|
2019-04-17 23:27:16 +05:30
|
|
|
raise TypeError('Unsupported DADF5 version {} '.format(f.attrs['DADF5-version']))
|
2019-04-13 14:41:32 +05:30
|
|
|
|
2019-05-03 10:16:22 +05:30
|
|
|
self.structured = 'grid' in f['geometry'].attrs.keys()
|
2019-04-13 14:41:32 +05:30
|
|
|
|
|
|
|
if self.structured:
|
2019-05-03 10:16:22 +05:30
|
|
|
self.grid = f['geometry'].attrs['grid']
|
|
|
|
self.size = f['geometry'].attrs['size']
|
2019-04-13 14:41:32 +05:30
|
|
|
|
|
|
|
r=re.compile('inc[0-9]+')
|
2019-04-17 23:27:16 +05:30
|
|
|
self.increments = [{'inc': int(u[3:]),
|
|
|
|
'time': round(f[u].attrs['time/s'],12),
|
2019-04-13 14:41:32 +05:30
|
|
|
} for u in f.keys() if r.match(u)]
|
2019-04-17 23:27:16 +05:30
|
|
|
|
2019-04-18 15:28:17 +05:30
|
|
|
self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name
|
|
|
|
self.constituents = [c.decode() for c in self.constituents]
|
2019-04-17 23:27:16 +05:30
|
|
|
|
2019-04-18 15:28:17 +05:30
|
|
|
self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name
|
|
|
|
self.materialpoints = [m.decode() for m in self.materialpoints]
|
|
|
|
|
|
|
|
self.Nconstituents = [i for i in range(np.shape(f['mapping/cellResults/constituent'])[1])]
|
|
|
|
self.Nmaterialpoints = np.shape(f['mapping/cellResults/constituent'])[0]
|
|
|
|
|
|
|
|
self.c_output_types = []
|
|
|
|
for c in self.constituents:
|
|
|
|
for o in f['inc{:05}/constituent/{}'.format(self.increments[0]['inc'],c)].keys():
|
|
|
|
self.c_output_types.append(o)
|
|
|
|
self.c_output_types = list(set(self.c_output_types)) # make unique
|
2019-05-16 03:02:23 +05:30
|
|
|
|
|
|
|
self.m_output_types = []
|
|
|
|
for m in self.materialpoints:
|
|
|
|
for o in f['inc{:05}/materialpoint/{}'.format(self.increments[0]['inc'],m)].keys():
|
|
|
|
self.m_output_types.append(o)
|
|
|
|
self.m_output_types = list(set(self.m_output_types)) # make unique
|
2019-04-18 15:28:17 +05:30
|
|
|
|
|
|
|
self.active= {'increments': self.increments,
|
|
|
|
'constituents': self.constituents,
|
|
|
|
'materialpoints': self.materialpoints,
|
|
|
|
'constituent': self.Nconstituents,
|
2019-05-16 03:02:23 +05:30
|
|
|
'c_output_types': self.c_output_types,
|
|
|
|
'm_output_types': self.m_output_types}
|
2019-04-13 14:41:32 +05:30
|
|
|
|
|
|
|
self.filename = filename
|
|
|
|
self.mode = mode
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
def get_candidates(self,l):
|
|
|
|
groups = []
|
|
|
|
if type(l) is not list:
|
|
|
|
print('mist')
|
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
for g in self.get_active_groups():
|
|
|
|
if set(l).issubset(f[g].keys()): groups.append(g)
|
|
|
|
return groups
|
|
|
|
|
2019-05-16 03:02:23 +05:30
|
|
|
|
2019-05-20 23:24:57 +05:30
|
|
|
def get_active_groups(self):
|
|
|
|
groups = []
|
|
|
|
for i,x in enumerate(self.active['increments']):
|
|
|
|
group_inc = 'inc{:05}'.format(self.active['increments'][i]['inc'])
|
|
|
|
for c in self.active['constituents']:
|
|
|
|
group_constituent = group_inc+'/constituent/'+c
|
|
|
|
for t in self.active['c_output_types']:
|
|
|
|
group_output_types = group_constituent+'/'+t
|
|
|
|
groups.append(group_output_types)
|
|
|
|
for m in self.active['materialpoints']:
|
|
|
|
group_materialpoint = group_inc+'/materialpoint/'+m
|
|
|
|
for t in self.active['m_output_types']:
|
|
|
|
group_output_types = group_materialpoint+'/'+t
|
|
|
|
groups.append(group_output_types)
|
|
|
|
return groups
|
2019-05-16 03:02:23 +05:30
|
|
|
|
2019-04-18 15:28:17 +05:30
|
|
|
def list_data(self):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Shows information on all datasets in the file."""
|
2019-04-18 15:28:17 +05:30
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
group_inc = 'inc{:05}'.format(self.active['increments'][0]['inc'])
|
|
|
|
for c in self.active['constituents']:
|
|
|
|
print('\n'+c)
|
|
|
|
group_constituent = group_inc+'/constituent/'+c
|
|
|
|
for t in self.active['c_output_types']:
|
|
|
|
print(' {}'.format(t))
|
|
|
|
group_output_types = group_constituent+'/'+t
|
|
|
|
try:
|
|
|
|
for x in f[group_output_types].keys():
|
|
|
|
print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode()))
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError:
|
2019-04-18 15:28:17 +05:30
|
|
|
pass
|
2019-05-16 03:02:23 +05:30
|
|
|
for m in self.active['materialpoints']:
|
|
|
|
group_materialpoint = group_inc+'/materialpoint/'+m
|
|
|
|
for t in self.active['m_output_types']:
|
|
|
|
print(' {}'.format(t))
|
|
|
|
group_output_types = group_materialpoint+'/'+t
|
|
|
|
try:
|
|
|
|
for x in f[group_output_types].keys():
|
|
|
|
print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode()))
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError:
|
2019-05-16 03:02:23 +05:30
|
|
|
pass
|
2019-04-18 15:28:17 +05:30
|
|
|
|
2019-04-17 23:27:16 +05:30
|
|
|
|
|
|
|
def get_dataset_location(self,label):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Returns the location of all active datasets with given label."""
|
2019-04-17 23:27:16 +05:30
|
|
|
path = []
|
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
for i in self.active['increments']:
|
|
|
|
group_inc = 'inc{:05}'.format(i['inc'])
|
2019-05-16 15:14:03 +05:30
|
|
|
|
2019-04-17 23:27:16 +05:30
|
|
|
for c in self.active['constituents']:
|
|
|
|
group_constituent = group_inc+'/constituent/'+c
|
2019-04-18 15:28:17 +05:30
|
|
|
for t in self.active['c_output_types']:
|
2019-04-17 23:27:16 +05:30
|
|
|
try:
|
|
|
|
f[group_constituent+'/'+t+'/'+label]
|
|
|
|
path.append(group_constituent+'/'+t+'/'+label)
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError as e:
|
2019-05-16 15:14:03 +05:30
|
|
|
print('unable to locate constituents dataset: '+ str(e))
|
|
|
|
|
2019-05-17 01:31:12 +05:30
|
|
|
for m in self.active['materialpoints']:
|
2019-05-16 03:57:06 +05:30
|
|
|
group_materialpoint = group_inc+'/materialpoint/'+m
|
|
|
|
for t in self.active['m_output_types']:
|
|
|
|
try:
|
|
|
|
f[group_materialpoint+'/'+t+'/'+label]
|
|
|
|
path.append(group_materialpoint+'/'+t+'/'+label)
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError as e:
|
2019-05-16 15:14:03 +05:30
|
|
|
print('unable to locate materialpoints dataset: '+ str(e))
|
|
|
|
|
2019-04-17 23:27:16 +05:30
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
|
def read_dataset(self,path,c):
|
2019-04-18 15:28:17 +05:30
|
|
|
"""
|
2019-09-12 06:33:19 +05:30
|
|
|
Dataset for all points/cells.
|
2019-04-18 15:28:17 +05:30
|
|
|
|
|
|
|
If more than one path is given, the dataset is composed of the individual contributions
|
|
|
|
"""
|
2019-04-17 23:27:16 +05:30
|
|
|
with h5py.File(self.filename,'r') as f:
|
2019-04-18 15:28:17 +05:30
|
|
|
shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:]
|
2019-05-16 13:01:13 +05:30
|
|
|
if len(shape) == 1: shape = shape +(1,)
|
2019-04-17 23:27:16 +05:30
|
|
|
dataset = np.full(shape,np.nan)
|
2019-04-18 15:28:17 +05:30
|
|
|
for pa in path:
|
|
|
|
label = pa.split('/')[2]
|
2019-05-16 03:57:06 +05:30
|
|
|
try:
|
|
|
|
p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0]
|
|
|
|
u = (f['mapping/cellResults/constituent'][p,c]['Position'])
|
2019-05-16 13:01:13 +05:30
|
|
|
a = np.array(f[pa])
|
|
|
|
if len(a.shape) == 1:
|
|
|
|
a=a.reshape([a.shape[0],1])
|
2019-05-16 15:14:03 +05:30
|
|
|
dataset[p,:] = a[u,:]
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError as e:
|
2019-05-16 15:14:03 +05:30
|
|
|
print('unable to read constituent: '+ str(e))
|
2019-05-16 03:57:06 +05:30
|
|
|
try:
|
|
|
|
p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0]
|
|
|
|
u = (f['mapping/cellResults/materialpoint'][p.tolist()]['Position'])
|
2019-05-16 13:01:13 +05:30
|
|
|
a = np.array(f[pa])
|
|
|
|
if len(a.shape) == 1:
|
|
|
|
a=a.reshape([a.shape[0],1])
|
|
|
|
dataset[p,:] = a[u,:]
|
2019-09-12 06:33:19 +05:30
|
|
|
except KeyError as e:
|
2019-05-16 15:14:03 +05:30
|
|
|
print('unable to read materialpoint: '+ str(e))
|
2019-05-16 03:57:06 +05:30
|
|
|
|
2019-04-17 23:27:16 +05:30
|
|
|
return dataset
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
|
2019-05-23 12:24:20 +05:30
|
|
|
def add_Cauchy(self,P='P',F='F'):
|
2019-09-12 06:27:24 +05:30
|
|
|
"""
|
|
|
|
Adds Cauchy stress calculated from 1st Piola-Kirchhoff stress and deformation gradient.
|
|
|
|
|
|
|
|
Todo
|
|
|
|
----
|
|
|
|
The einsum formula is completely untested!
|
2019-09-12 06:33:19 +05:30
|
|
|
|
2019-09-12 06:27:24 +05:30
|
|
|
"""
|
2019-05-20 23:24:57 +05:30
|
|
|
def Cauchy(F,P):
|
2019-09-12 06:27:24 +05:30
|
|
|
return np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),F,P)
|
2019-05-20 23:24:57 +05:30
|
|
|
|
2019-05-23 12:24:20 +05:30
|
|
|
args = [{'label':F,'shape':[3,3],'unit':'-'},
|
|
|
|
{'label':P,'shape':[3,3],'unit':'Pa'} ]
|
|
|
|
result = {'label':'sigma',
|
|
|
|
'unit':'Pa',
|
|
|
|
'Description': 'Cauchy stress calculated from 1st Piola-Kirchhoff stress and deformation gradient'}
|
2019-05-20 23:24:57 +05:30
|
|
|
|
2019-09-12 06:27:24 +05:30
|
|
|
self.add_generic_pointwise_vectorized(Cauchy,args,result)
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
|
2019-05-23 20:52:57 +05:30
|
|
|
def add_Mises_stress(self,stress='sigma'):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Adds equivalent von Mises stress."""
|
2019-05-20 23:24:57 +05:30
|
|
|
def Mises_stress(stress):
|
|
|
|
dev = stress - np.trace(stress)/3.0*np.eye(3)
|
|
|
|
symdev = 0.5*(dev+dev.T)
|
|
|
|
return np.sqrt(np.sum(symdev*symdev.T)*3.0/2.0)
|
|
|
|
|
|
|
|
args = [{'label':stress,'shape':[3,3],'unit':'Pa'}]
|
2019-05-23 12:24:20 +05:30
|
|
|
result = {'label':'Mises({})'.format(stress),
|
|
|
|
'unit':'Pa',
|
|
|
|
'Description': 'Equivalent Mises stress'}
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
self.add_generic_pointwise(Mises_stress,args,result)
|
2019-05-23 12:24:20 +05:30
|
|
|
|
2019-05-23 20:52:57 +05:30
|
|
|
|
2019-05-23 12:24:20 +05:30
|
|
|
def add_norm(self,x,ord=None):
|
2019-09-12 07:24:26 +05:30
|
|
|
"""
|
|
|
|
Adds norm of vector or tensor or magnitude of a scalar.
|
|
|
|
|
|
|
|
Todo
|
|
|
|
----
|
|
|
|
The output unit should be the input unit.
|
|
|
|
The ord parameter should be taken into account.
|
|
|
|
The whole thing should be vectorized. This requires to parse optional arguments to func.
|
|
|
|
|
|
|
|
"""
|
2019-05-23 12:24:20 +05:30
|
|
|
args = [{'label':x,'shape':None,'unit':None}]
|
|
|
|
result = {'label':'norm_{}({})'.format(str(ord),x),
|
|
|
|
'unit':'n/a',
|
|
|
|
'Description': 'Norm of vector or tensor or magnitude of a scalar. See numpy.linalg.norm manual for details'}
|
|
|
|
|
2019-09-12 07:24:26 +05:30
|
|
|
self.add_generic_pointwise(np.linalg.norm,args,result)
|
2019-05-23 20:52:57 +05:30
|
|
|
|
|
|
|
|
2019-05-23 12:24:20 +05:30
|
|
|
def add_determinant(self,a):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Adds the determinant of a tensor."""
|
2019-05-23 12:24:20 +05:30
|
|
|
# ToDo: The output unit should be the input unit
|
|
|
|
args = [{'label':a,'shape':[3,3],'unit':None}]
|
|
|
|
result = {'label':'det({})'.format(a),
|
|
|
|
'unit':'n/a',
|
2019-05-23 20:52:57 +05:30
|
|
|
'Description': 'Determinant of a tensor'}
|
2019-05-23 12:24:20 +05:30
|
|
|
|
2019-09-12 06:27:24 +05:30
|
|
|
self.add_generic_pointwise_vectorized(np.linalg.det,args,result)
|
2019-07-16 02:25:14 +05:30
|
|
|
|
|
|
|
|
|
|
|
def add_spherical(self,a):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Adds the spherical component of a tensor."""
|
2019-07-16 02:25:14 +05:30
|
|
|
def spherical(m):
|
|
|
|
return (m[0,0]+m[1,1]+m[2,2])/3.0
|
|
|
|
|
|
|
|
# ToDo: The output unit should be the input unit
|
|
|
|
args = [{'label':a,'shape':[3,3],'unit':None}]
|
|
|
|
result = {'label':'sph({})'.format(a),
|
|
|
|
'unit':'n/a',
|
|
|
|
'Description': 'Spherical component of a tensor'}
|
|
|
|
|
|
|
|
self.add_generic_pointwise(spherical,args,result)
|
|
|
|
|
|
|
|
|
|
|
|
def add_deviator(self,a):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Adds the deviator of a tensor."""
|
2019-07-16 02:25:14 +05:30
|
|
|
def deviator(m):
|
|
|
|
return m - np.eye(3)*(m[0,0]+m[1,1]+m[2,2])/3.0
|
|
|
|
|
|
|
|
# ToDo: The output unit should be the input unit
|
|
|
|
args = [{'label':a,'shape':[3,3],'unit':'Pa'}]
|
|
|
|
result = {'label':'dev({})'.format(a),
|
|
|
|
'unit':'n/a',
|
|
|
|
'Description': 'Deviatoric component of a tensor'}
|
|
|
|
|
|
|
|
self.add_generic_pointwise(deviator,args,result)
|
|
|
|
|
2019-05-23 20:52:57 +05:30
|
|
|
|
2019-07-07 10:11:29 +05:30
|
|
|
|
|
|
|
def add_strain_tensors(self,defgrad='F'):
|
2019-09-12 06:33:19 +05:30
|
|
|
"""Adds a strain definition."""
|
2019-07-07 10:11:29 +05:30
|
|
|
def strain(defgrad):
|
|
|
|
(U,S,Vh) = np.linalg.svd(defgrad) # singular value decomposition
|
2019-09-11 19:59:34 +05:30
|
|
|
R_inv = np.dot(U,Vh).T # inverse rotation of polar decomposition
|
2019-07-07 10:11:29 +05:30
|
|
|
U = np.dot(R_inv,defgrad) # F = RU
|
|
|
|
U = np.where(abs(U) < 1e-12, 0, U) # kill nasty noisy data
|
|
|
|
(D,V) = np.linalg.eig(U) # eigen decomposition (of symmetric matrix)
|
|
|
|
neg = np.where(D < 0.0) # find negative eigenvalues ...
|
|
|
|
D[neg] *= -1. # ... flip value ...
|
|
|
|
V[:,neg] *= -1. # ... and vector
|
|
|
|
for i,eigval in enumerate(D):
|
|
|
|
if np.dot(V[:,i],V[:,(i+1)%3]) != 0.0: # check each vector for orthogonality
|
|
|
|
V[:,(i+1)%3] = np.cross(V[:,(i+2)%3],V[:,i]) # correct next vector
|
|
|
|
V[:,(i+1)%3] /= np.sqrt(np.dot(V[:,(i+1)%3],V[:,(i+1)%3].conj())) # and renormalize (hyperphobic?)
|
|
|
|
d = np.log(D) # operate on eigenvalues of U o r V
|
|
|
|
return np.dot(V,np.dot(np.diag(d),V.T)).real # build tensor back from eigenvalue/vector basis
|
|
|
|
|
|
|
|
# ToDo: The output unit should be the input unit
|
|
|
|
args = [{'label':defgrad,'shape':[3,3],'unit':None}]
|
|
|
|
result = {'label':'strain({})'.format(defgrad),
|
|
|
|
'unit':'-',
|
|
|
|
'Description': 'strain (ln(V)) of a deformation gradient'}
|
|
|
|
|
|
|
|
self.add_generic_pointwise(strain,args,result)
|
|
|
|
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
def get_fitting(self,data):
|
|
|
|
groups = []
|
|
|
|
if type(data) is not list:
|
|
|
|
print('mist')
|
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
for g in self.get_candidates([l['label'] for l in data]):
|
|
|
|
print(g)
|
|
|
|
fits = True
|
2019-05-23 12:24:20 +05:30
|
|
|
for d in data: # ToDo: check for unit
|
|
|
|
if d['shape'] is not None:
|
|
|
|
fits = fits and np.all(np.array(f[g+'/'+d['label']].shape[1:]) == np.array(d['shape']))
|
2019-05-20 23:24:57 +05:30
|
|
|
if fits: groups.append(g)
|
|
|
|
return groups
|
|
|
|
|
|
|
|
|
|
|
|
def add_generic_pointwise(self,func,args,result):
|
|
|
|
"""
|
2019-09-12 06:33:19 +05:30
|
|
|
General function to add pointwise data.
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
function 'func' first needs to have data arguments before other arguments
|
2019-09-12 06:27:24 +05:30
|
|
|
Works for functions that are pointwise defined.
|
2019-05-20 23:24:57 +05:30
|
|
|
"""
|
|
|
|
groups = self.get_fitting(args)
|
|
|
|
|
|
|
|
def job(args):
|
|
|
|
out = args['out']
|
|
|
|
datasets_in = args['dat']
|
|
|
|
func = args['fun']
|
|
|
|
for i in range(out.shape[0]):
|
|
|
|
arg = tuple([d[i,] for d in datasets_in])
|
|
|
|
out[i,] = func(*arg)
|
|
|
|
args['results'].put({'out':out,'group':args['group']})
|
|
|
|
|
|
|
|
Nthreads = 4 # ToDo: should be a parameter
|
|
|
|
results = Queue(Nthreads+1)
|
|
|
|
|
|
|
|
todo = []
|
|
|
|
|
|
|
|
for g in groups:
|
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
datasets_in = [f[g+'/'+u['label']][()] for u in args]
|
|
|
|
|
|
|
|
# figure out dimension of results
|
|
|
|
testArg = tuple([d[0,] for d in datasets_in]) # to call function with first point
|
|
|
|
out = np.empty([datasets_in[0].shape[0]] + list(func(*testArg).shape)) # shape is Npoints x shape of the results for one point
|
|
|
|
todo.append({'dat':datasets_in,'fun':func,'out':out,'group':g,'results':results})
|
|
|
|
|
|
|
|
# Instantiate a thread pool with worker threads
|
|
|
|
pool = util.ThreadPool(Nthreads)
|
|
|
|
missingResults = len(todo)
|
|
|
|
|
|
|
|
|
|
|
|
# Add the jobs in bulk to the thread pool. Alternatively you could use
|
|
|
|
# `pool.add_task` to add single jobs. The code will block here, which
|
|
|
|
# makes it possible to cancel the thread pool with an exception when
|
2019-09-12 06:27:24 +05:30
|
|
|
# the currently running batch of workers is finished
|
2019-05-20 23:24:57 +05:30
|
|
|
|
|
|
|
pool.map(job, todo[:Nthreads+1])
|
|
|
|
i = 0
|
|
|
|
while missingResults > 0:
|
|
|
|
r=results.get() # noqa
|
|
|
|
print(r['group'])
|
|
|
|
with h5py.File(self.filename,'r+') as f:
|
|
|
|
dataset_out = f[r['group']].create_dataset(result['label'],data=r['out'])
|
2019-05-23 12:24:20 +05:30
|
|
|
dataset_out.attrs['Unit'] = result['unit']
|
|
|
|
dataset_out.attrs['Description'] = result['Description']
|
|
|
|
dataset_out.attrs['Creator'] = 'dadf5.py v{}'.format('n/a')
|
2019-05-20 23:24:57 +05:30
|
|
|
missingResults-=1
|
|
|
|
try:
|
|
|
|
pool.add_task(job,todo[Nthreads+1+i])
|
2019-09-12 06:33:19 +05:30
|
|
|
except IndexError:
|
2019-05-20 23:24:57 +05:30
|
|
|
pass
|
|
|
|
i+=1
|
|
|
|
|
|
|
|
pool.wait_completion()
|
2019-09-12 06:27:24 +05:30
|
|
|
|
|
|
|
|
|
|
|
def add_generic_pointwise_vectorized(self,func,args,result):
|
|
|
|
"""
|
2019-09-12 06:33:19 +05:30
|
|
|
General function to add pointwise data.
|
2019-09-12 06:27:24 +05:30
|
|
|
|
|
|
|
function 'func' first needs to have data arguments before other arguments
|
|
|
|
Works for vectorized functions.
|
|
|
|
"""
|
|
|
|
groups = self.get_fitting(args)
|
|
|
|
|
|
|
|
def job(args):
|
|
|
|
out = args['out']
|
|
|
|
datasets_in = args['dat']
|
|
|
|
func = args['fun']
|
|
|
|
out = func(*datasets_in)
|
|
|
|
args['results'].put({'out':out,'group':args['group']})
|
|
|
|
|
|
|
|
Nthreads = 4 # ToDo: should be a parameter
|
|
|
|
results = Queue(Nthreads+1)
|
|
|
|
|
|
|
|
todo = []
|
|
|
|
|
|
|
|
for g in groups:
|
|
|
|
with h5py.File(self.filename,'r') as f:
|
|
|
|
datasets_in = [f[g+'/'+u['label']][()] for u in args]
|
2019-04-17 23:27:16 +05:30
|
|
|
|
2019-09-12 06:27:24 +05:30
|
|
|
# figure out dimension of results
|
|
|
|
testArg = tuple([d[0:1,] for d in datasets_in]) # to call function with first point
|
|
|
|
out = np.empty([datasets_in[0].shape[0]] + list(func(*testArg).shape[1:])) # shape is Npoints x shape of the results for one point
|
|
|
|
todo.append({'dat':datasets_in,'fun':func,'out':out,'group':g,'results':results})
|
|
|
|
|
|
|
|
# Instantiate a thread pool with worker threads
|
|
|
|
pool = util.ThreadPool(Nthreads)
|
|
|
|
missingResults = len(todo)
|
|
|
|
|
|
|
|
|
|
|
|
# Add the jobs in bulk to the thread pool. Alternatively you could use
|
|
|
|
# `pool.add_task` to add single jobs. The code will block here, which
|
|
|
|
# makes it possible to cancel the thread pool with an exception when
|
|
|
|
# the currently running batch of workers is finished
|
|
|
|
|
|
|
|
pool.map(job, todo[:Nthreads+1])
|
|
|
|
i = 0
|
|
|
|
while missingResults > 0:
|
|
|
|
r=results.get() # noqa
|
|
|
|
print(r['group'])
|
|
|
|
with h5py.File(self.filename,'r+') as f:
|
|
|
|
dataset_out = f[r['group']].create_dataset(result['label'],data=r['out'])
|
|
|
|
dataset_out.attrs['Unit'] = result['unit']
|
|
|
|
dataset_out.attrs['Description'] = result['Description']
|
|
|
|
dataset_out.attrs['Creator'] = 'dadf5.py v{}'.format('n/a')
|
|
|
|
missingResults-=1
|
|
|
|
try:
|
|
|
|
pool.add_task(job,todo[Nthreads+1+i])
|
2019-09-12 06:33:19 +05:30
|
|
|
except IndexError:
|
2019-09-12 06:27:24 +05:30
|
|
|
pass
|
|
|
|
i+=1
|
|
|
|
|
|
|
|
pool.wait_completion()
|