Merge branch 'even-more-HDF5-postprocessing' into development

This commit is contained in:
Philip Eisenlohr 2019-09-18 21:14:19 -04:00
commit b06c5bd686
9 changed files with 634 additions and 136 deletions

@ -1 +1 @@
Subproject commit 5c5adbd8ccc0210fd6507431db8ec82ecec75352
Subproject commit 93564092d20e0c9553245418874ddc3b4484f3dd

View File

@ -1,9 +1,10 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os
import numpy as np
import argparse
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -23,9 +24,9 @@ parser.add_argument('filenames', nargs='+',
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory to hold output')
parser.add_argument('--mat', nargs='+',
help='labels for materialpoint/homogenization',dest='mat')
help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+',
help='labels for constituent/crystallite/constitutive',dest='con')
help='labels for constituent',dest='con')
options = parser.parse_args()
@ -46,32 +47,27 @@ for filename in options.filenames:
coords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
for i,inc in enumerate(results.increments):
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
header = '1 header\n'
data = np.array([inc['inc'] for j in range(np.product(results.grid))]).reshape([np.product(results.grid),1])
data = np.array([int(inc[3:]) for j in range(np.product(results.grid))]).reshape([np.product(results.grid),1])
header+= 'inc'
coords = coords.reshape([np.product(results.grid),3])
data = np.concatenate((data,coords),1)
header+=' 1_pos 2_pos 3_pos'
results.active['increments'] = [inc]
for label in options.con:
for o in results.c_output_types:
results.active['c_output_types'] = [o]
for c in results.constituents:
results.active['constituents'] = [c]
for p in results.iter_visible('con_physics'):
for c in results.iter_visible('constituents'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
label = x[0].split('/')[-1]
array = results.read_dataset(x,0)
d = int(np.product(np.shape(array)[1:]))
array = np.reshape(array,[np.product(results.grid),d])
data = np.concatenate((data,array),1)
data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])
@ -79,18 +75,14 @@ for filename in options.filenames:
header+=' '+label
for label in options.mat:
for o in results.m_output_types:
results.active['m_output_types'] = [o]
for m in results.materialpoints:
results.active['materialpoints'] = [m]
for p in results.iter_visible('mat_physics'):
for m in results.iter_visible('materialpoints'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
label = x[0].split('/')[-1]
array = results.read_dataset(x,0)
d = int(np.product(np.shape(array)[1:]))
array = np.reshape(array,[np.product(results.grid),d])
data = np.concatenate((data,array),1)
data = np.concatenate((data,np.reshape(array,[np.product(results.grid),d])),1)
if d>1:
header+= ''.join([' {}_{}'.format(j+1,label) for j in range(d)])
@ -102,5 +94,5 @@ for filename in options.filenames:
os.mkdir(dirname)
except FileExistsError:
pass
file_out = '{}_inc{:04d}.txt'.format(filename.split('.')[0],inc['inc'])
file_out = '{}_{}.txt'.format(filename.split('.')[0],inc)
np.savetxt(os.path.join(dirname,file_out),data,header=header,comments='')

View File

@ -1,12 +1,14 @@
#!/usr/bin/env python3
# -*- coding: UTF-8 no BOM -*-
import os,vtk
import numpy as np
import os
import argparse
import damask
import numpy as np
import vtk
from vtk.util import numpy_support
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
@ -24,9 +26,9 @@ parser.add_argument('filenames', nargs='+',
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
help='name of subdirectory to hold output')
parser.add_argument('--mat', nargs='+',
help='labels for materialpoint/homogenization',dest='mat')
help='labels for materialpoint',dest='mat')
parser.add_argument('--con', nargs='+',
help='labels for constituent/crystallite/constitutive',dest='con')
help='labels for constituent',dest='con')
options = parser.parse_args()
@ -55,18 +57,17 @@ for filename in options.filenames:
rGrid.SetZCoordinates(coordArray[2])
for i,inc in enumerate(results.increments):
for i,inc in enumerate(results.iter_visible('increments')):
print('Output step {}/{}'.format(i+1,len(results.increments)))
vtk_data = []
results.active['increments'] = [inc]
results.set_visible('materialpoints',False)
results.set_visible('constituents', True)
for label in options.con:
for o in results.c_output_types:
results.active['c_output_types'] = [o]
if o != 'generic':
for c in results.constituents:
results.active['constituents'] = [c]
for p in results.iter_visible('con_physics'):
if p != 'generic':
for c in results.iter_visible('constituents'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
@ -76,14 +77,37 @@ for filename in options.filenames:
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
rGrid.GetCellData().AddArray(vtk_data[-1])
else:
results.active['constituents'] = results.constituents
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/')[1]+'/generic/'+label)
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
rGrid.GetCellData().AddArray(vtk_data[-1])
results.set_visible('constituents', False)
results.set_visible('materialpoints',True)
for label in options.mat:
for p in results.iter_visible('mat_physics'):
if p != 'generic':
for m in results.iter_visible('materialpoints'):
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
rGrid.GetCellData().AddArray(vtk_data[-1])
else:
x = results.get_dataset_location(label)
if len(x) == 0:
continue
array = results.read_dataset(x,0)
shape = [array.shape[0],np.product(array.shape[1:])]
vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True,array_type= vtk.VTK_DOUBLE))
vtk_data[-1].SetName('1_'+x[0].split('/',1)[1])
rGrid.GetCellData().AddArray(vtk_data[-1])
if results.structured:
@ -95,7 +119,7 @@ for filename in options.filenames:
os.mkdir(dirname)
except FileExistsError:
pass
file_out = '{}_inc{:04d}.{}'.format(filename.split('.')[0],inc['inc'],writer.GetDefaultFileExtension())
file_out = '{}_{}.{}'.format(filename.split('.')[0],inc,writer.GetDefaultFileExtension())
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()

View File

@ -146,7 +146,7 @@ for name in filenames:
neg = np.where(D < 0.0) # find negative eigenvalues ...
D[neg] *= -1. # ... flip value ...
V[:,neg] *= -1. # ... and vector
for theStrain in strains:
for theStrain in strains:
d = operator(theStretch,theStrain,D) # operate on eigenvalues of U or V
eps = np.dot(V,np.dot(np.diag(d),V.T)).reshape(9) # build tensor back from eigenvalue/vector basis

View File

@ -1,23 +1,31 @@
# -*- coding: UTF-8 no BOM -*-
import h5py
from queue import Queue
import re
import glob
import h5py
import numpy as np
from . import util
# ------------------------------------------------------------------
class DADF5():
"""Read and write to DADF5 files"""
"""
Read and write to DADF5 files.
DADF5 files contain DAMASK results.
"""
# ------------------------------------------------------------------
def __init__(self,
filename,
mode = 'r',
):
def __init__(self,filename):
"""
Opens an existing DADF5 file.
if mode not in ['a','r']:
print('Invalid file access mode')
with h5py.File(filename,mode):
pass
Parameters
----------
filename : str
name of the DADF5 file to be openend.
"""
with h5py.File(filename,'r') as f:
if f.attrs['DADF5-major'] != 0 or f.attrs['DADF5-minor'] != 2:
@ -30,92 +38,198 @@ class DADF5():
self.size = f['geometry'].attrs['size']
r=re.compile('inc[0-9]+')
self.increments = [{'inc': int(u[3:]),
'time': round(f[u].attrs['time/s'],12),
} for u in f.keys() if r.match(u)]
self.constituents = np.unique(f['mapping/cellResults/constituent']['Name']).tolist() # ToDo: I am not to happy with the name
self.constituents = [c.decode() for c in self.constituents]
self.materialpoints = np.unique(f['mapping/cellResults/materialpoint']['Name']).tolist() # ToDo: I am not to happy with the name
self.materialpoints = [m.decode() for m in self.materialpoints]
self.Nconstituents = [i for i in range(np.shape(f['mapping/cellResults/constituent'])[1])]
self.Nmaterialpoints = np.shape(f['mapping/cellResults/constituent'])[0]
self.c_output_types = []
for c in self.constituents:
for o in f['inc{:05}/constituent/{}'.format(self.increments[0]['inc'],c)].keys():
self.c_output_types.append(o)
self.c_output_types = list(set(self.c_output_types)) # make unique
self.increments = [i for i in f.keys() if r.match(i)]
self.times = [round(f[i].attrs['time/s'],12) for i in self.increments]
self.m_output_types = []
self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent'])
self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])]
self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])]
self.con_physics = []
for c in self.constituents:
self.con_physics += f['inc00000/constituent/{}'.format(c)].keys()
self.con_physics = list(set(self.con_physics)) # make unique
self.mat_physics = []
for m in self.materialpoints:
for o in f['inc{:05}/materialpoint/{}'.format(self.increments[0]['inc'],m)].keys():
self.m_output_types.append(o)
self.m_output_types = list(set(self.m_output_types)) # make unique
self.active= {'increments': self.increments,
self.mat_physics += f['inc00000/materialpoint/{}'.format(m)].keys()
self.mat_physics = list(set(self.mat_physics)) # make unique
self.visible= {'increments': self.increments, # ToDo:simplify, activity only positions that translate into (no complex types)
'constituents': self.constituents,
'materialpoints': self.materialpoints,
'constituent': self.Nconstituents,
'c_output_types': self.c_output_types,
'm_output_types': self.m_output_types}
'constituent': range(self.Nconstituents), # ToDo: stupid naming
'con_physics': self.con_physics,
'mat_physics': self.mat_physics}
self.filename = filename
self.mode = mode
def __manage_visible(self,datasets,what,action):
"""Manages the visibility of the groups."""
# allow True/False and string arguments
if datasets is True:
datasets = ['*']
elif datasets is False:
datasets = []
choice = [datasets] if isinstance(datasets,str) else datasets
valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what) ,s) for s in choice] for e in e_]
existing = set(self.visible[what])
if action == 'set':
self.visible[what] = valid
elif action == 'add':
self.visible[what] = list(existing.union(valid))
elif action == 'del':
self.visible[what] = list(existing.difference_update(valid))
def __time_to_inc(self,start,end):
selected = []
for i,time in enumerate(self.times):
if start <= time < end:
selected.append(self.increments[i])
return selected
def set_by_time(self,start,end):
self.__manage_visible(self.__time_to_inc(start,end),'increments','set')
def add_by_time(self,start,end):
self.__manage_visible(self.__time_to_inc(start,end),'increments','add')
def del_by_time(self,start,end):
self.__manage_visible(self.__time_to_inc(start,end),'increments','del')
def iter_visible(self,what):
"""Iterates over visible items by setting each one visible."""
datasets = self.visible[what]
last_datasets = datasets.copy()
for dataset in datasets:
if last_datasets != self.visible[what]:
self.__manage_visible(datasets,what,'set')
raise Exception
self.__manage_visible(dataset,what,'set')
last_datasets = self.visible[what]
yield dataset
self.__manage_visible(datasets,what,'set')
def set_visible(self,what,datasets):
self.__manage_visible(datasets,what,'set')
def add_visible(self,what,datasets):
self.__manage_visible(datasets,what,'add')
def del_visible(self,what,datasets):
self.__manage_visible(datasets,what,'del')
def groups_with_datasets(self,datasets):
"""
Get groups that contain all requested datasets.
Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/*
are considered as they contain the data.
Single strings will be treated as list with one entry.
Wild card matching is allowed, but the number of arguments need to fit.
Parameters
----------
datasets : iterable or str or boolean
Examples
--------
datasets = False matches no group
datasets = True matches all groups
datasets = ['F','P'] matches a group with ['F','P','sigma']
datasets = ['*','P'] matches a group with ['F','P']
datasets = ['*'] does not match a group with ['F','P','sigma']
datasets = ['*','*'] does not match a group with ['F','P','sigma']
datasets = ['*','*','*'] matches a group with ['F','P','sigma']
"""
if datasets is False: return []
sets = [datasets] if isinstance(datasets,str) else datasets
groups = []
with h5py.File(self.filename,'r') as f:
for i in self.iter_visible('increments'): #ToDo: Merge path only once at the end '/'.join(listE)
for c in self.iter_visible('constituents'):
for p in self.iter_visible('con_physics'):
group = '/'.join([i,'constituent',c,p])
if sets is True:
groups.append(group)
else:
match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in sets] for e in e_]
if len(set(match)) == len(sets) : groups.append(group)
for m in self.iter_visible('materialpoints'):
for p in self.iter_visible('mat_physics'):
group = '/'.join([i,'materialpoint',m,p])
if sets is True:
groups.append(group)
else:
match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in sets] for e in e_]
if len(set(match)) == len(sets) : groups.append(group)
return groups
def list_data(self):
"""Shows information on all datasets in the file"""
"""Shows information on all active datasets in the file."""
with h5py.File(self.filename,'r') as f:
group_inc = 'inc{:05}'.format(self.active['increments'][0]['inc'])
for c in self.active['constituents']:
print('\n'+c)
group_constituent = group_inc+'/constituent/'+c
for t in self.active['c_output_types']:
print(' {}'.format(t))
group_output_types = group_constituent+'/'+t
i = 'inc{:05}'.format(0)
for c in self.iter_visible('constituents'):
print('{}'.format(c))
for p in self.iter_visible('con_physics'):
print(' {}'.format(p))
try:
for x in f[group_output_types].keys():
print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode()))
except:
k = '/'.join([i,'constituent',c,p])
for d in f[k].keys():
print(' {} ({})'.format(d,f[k+'/'+d].attrs['Description'].decode()))
except KeyError:
pass
for m in self.active['materialpoints']:
group_materialpoint = group_inc+'/materialpoint/'+m
for t in self.active['m_output_types']:
print(' {}'.format(t))
group_output_types = group_materialpoint+'/'+t
for m in self.iter_visible('materialpoints'):
print('{}'.format(m))
for p in self.iter_visible('mat_physics'):
print(' {}'.format(p))
try:
for x in f[group_output_types].keys():
print(' {} ({})'.format(x,f[group_output_types+'/'+x].attrs['Description'].decode()))
except:
k = '/'.join([i,'materialpoint',m,p])
for d in f[k].keys():
print(' {} ({})'.format(d,f[k+'/'+d].attrs['Description'].decode()))
except KeyError:
pass
def get_dataset_location(self,label):
"""Returns the location of all active datasets with given label"""
"""Returns the location of all active datasets with given label."""
path = []
with h5py.File(self.filename,'r') as f:
for i in self.active['increments']:
group_inc = 'inc{:05}'.format(i['inc'])
for c in self.active['constituents']:
group_constituent = group_inc+'/constituent/'+c
for t in self.active['c_output_types']:
for i in self.iter_visible('increments'):
for c in self.iter_visible('constituents'):
for p in self.iter_visible('con_physics'):
try:
f[group_constituent+'/'+t+'/'+label]
path.append(group_constituent+'/'+t+'/'+label)
except Exception as e:
k = '/'.join([i,'constituent',c,p,label])
f[k]
path.append(k)
except KeyError as e:
print('unable to locate constituents dataset: '+ str(e))
for m in self.active['materialpoints']:
group_materialpoint = group_inc+'/materialpoint/'+m
for t in self.active['m_output_types']:
for m in self.iter_visible('materialpoints'):
for p in self.iter_visible('mat_physics'):
try:
f[group_materialpoint+'/'+t+'/'+label]
path.append(group_materialpoint+'/'+t+'/'+label)
except Exception as e:
k = '/'.join([i,'materialpoint',m,p,label])
f[k]
path.append(k)
except KeyError as e:
print('unable to locate materialpoints dataset: '+ str(e))
return path
@ -123,7 +237,7 @@ class DADF5():
def read_dataset(self,path,c):
"""
Dataset for all points/cells
Dataset for all points/cells.
If more than one path is given, the dataset is composed of the individual contributions
"""
@ -133,25 +247,330 @@ class DADF5():
dataset = np.full(shape,np.nan)
for pa in path:
label = pa.split('/')[2]
try:
p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0]
p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0]
if len(p)>0:
u = (f['mapping/cellResults/constituent'][p,c]['Position'])
a = np.array(f[pa])
if len(a.shape) == 1:
a=a.reshape([a.shape[0],1])
dataset[p,:] = a[u,:]
except Exception as e:
print('unable to read constituent: '+ str(e))
try:
p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0]
p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0]
if len(p)>0:
u = (f['mapping/cellResults/materialpoint'][p.tolist()]['Position'])
a = np.array(f[pa])
if len(a.shape) == 1:
a=a.reshape([a.shape[0],1])
dataset[p,:] = a[u,:]
except Exception as e:
print('unable to read materialpoint: '+ str(e))
return dataset
def add_Cauchy(self,P='P',F='F'):
"""
Adds Cauchy stress calculated from 1st Piola-Kirchhoff stress and deformation gradient.
Resulting tensor is symmetrized as the Cauchy stress should be symmetric.
"""
def Cauchy(F,P):
sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F['data']),P['data'],F['data'])
sigma = (sigma + np.einsum('ikj',sigma))*0.5 # enforce symmetry
return {
'data' : sigma,
'label' : 'sigma',
'meta' : {
'Unit' : P['meta']['Unit'],
'Description' : 'Cauchy stress calculated from {} ({}) '.format(P['label'],P['meta']['Description'])+\
'and deformation gradient {} ({})'.format(F['label'],F['meta']['Description']),
'Creator' : 'dadf5.py:add_Cauchy vXXXXX'
}
}
requested = [{'label':F,'arg':'F'},
{'label':P,'arg':'P'} ]
self.__add_generic_pointwise(Cauchy,requested)
def add_Mises(self,x):
"""Adds the equivalent Mises stress or strain of a tensor."""
def Mises(x):
if x['meta']['Unit'] == b'Pa': #ToDo: Should we use this? Then add_Cauchy and add_strain_tensors also should perform sanity checks
factor = 3.0/2.0
t = 'stress'
elif x['meta']['Unit'] == b'1':
factor = 2.0/3.0
t = 'strain'
else:
print(x['meta']['Unit'])
raise ValueError
d = x['data']
dev = d - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[d.shape[0],3,3]),np.trace(d,axis1=1,axis2=2)/3.0)
#dev_sym = (dev + np.einsum('ikj',dev))*0.5 # ToDo: this is not needed (only if the input is not symmetric, but then the whole concept breaks down)
return {
'data' : np.sqrt(np.einsum('ijk->i',dev**2)*factor),
'label' : '{}_vM'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Mises equivalent {} of {} ({})'.format(t,x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_Mises_stress vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(Mises,requested)
def add_norm(self,x,ord=None):
"""
Adds norm of vector or tensor.
See numpy.linalg.norm manual for details.
"""
def norm(x,ord):
o = ord
if len(x['data'].shape) == 2:
axis = 1
t = 'vector'
if o is None: o = 2
elif len(x['data'].shape) == 3:
axis = (1,2)
t = 'tensor'
if o is None: o = 'fro'
else:
raise ValueError
return {
'data' : np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True),
'label' : '|{}|_{}'.format(x['label'],o),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_norm vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(norm,requested,{'ord':ord})
def add_absolute(self,x):
"""Adds absolute value."""
def absolute(x):
return {
'data' : np.abs(x['data']),
'label' : '|{}|'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_abs vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(absolute,requested)
def add_determinant(self,x):
"""Adds the determinant component of a tensor."""
def determinant(x):
return {
'data' : np.linalg.det(x['data']),
'label' : 'det({})'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_determinant vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(determinant,requested)
def add_spherical(self,x):
"""Adds the spherical component of a tensor."""
def spherical(x):
if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])):
raise ValueError
return {
'data' : np.trace(x['data'],axis1=1,axis2=2)/3.0,
'label' : 'sph({})'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Spherical component of tensor {} ({})'.format(x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_spherical vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(spherical,requested)
def add_deviator(self,x):
"""Adds the deviator of a tensor."""
def deviator(x):
d = x['data']
if not np.all(np.array(d.shape[1:]) == np.array([3,3])):
raise ValueError
return {
'data' : d - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[d.shape[0],3,3]),np.trace(d,axis1=1,axis2=2)/3.0),
'label' : 'dev({})'.format(x['label']),
'meta' : {
'Unit' : x['meta']['Unit'],
'Description' : 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']),
'Creator' : 'dadf5.py:add_deviator vXXXXX'
}
}
requested = [{'label':x,'arg':'x'}]
self.__add_generic_pointwise(deviator,requested)
def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True):
"""
General formula.
Works currently only for vectorized expressions
"""
if vectorized is not True:
raise NotImplementedError
def calculation(**kwargs):
formula = kwargs['formula']
for d in re.findall(r'#(.*?)#',formula):
formula = re.sub('#{}#'.format(d),"kwargs['{}']['data']".format(d),formula)
return {
'data' : eval(formula),
'label' : kwargs['label'],
'meta' : {
'Unit' : kwargs['unit'],
'Description' : '{}'.format(kwargs['description']),
'Creator' : 'dadf5.py:add_calculation vXXXXX'
}
}
requested = [{'label':d,'arg':d} for d in re.findall(r'#(.*?)#',formula)] # datasets used in the formula
pass_through = {'formula':formula,'label':label,'unit':unit,'description':description}
self.__add_generic_pointwise(calculation,requested,pass_through)
def add_strain_tensor(self,t,ord,defgrad='F'): #ToDo: Use t and ord
"""
Adds the a strain tensor.
Albrecht Bertram: Elasticity and Plasticity of Large Deformations An Introduction (3rd Edition, 2012), p. 102.
"""
def strain_tensor(defgrad,t,ord):
operator = {
'V#ln': lambda V: np.log(V),
'U#ln': lambda U: np.log(U),
'V#Biot': lambda V: np.broadcast_to(np.ones(3),[V.shape[0],3]) - 1.0/V,
'U#Biot': lambda U: U - np.broadcast_to(np.ones(3),[U.shape[0],3]),
'V#Green':lambda V: np.broadcast_to(np.ones(3),[V.shape[0],3]) - 1.0/V**2,
'U#Biot': lambda U: U**2 - np.broadcast_to(np.ones(3),[U.shape[0],3]),
}
(U,S,Vh) = np.linalg.svd(defgrad['data']) # singular value decomposition
R_inv = np.einsum('ikj',np.matmul(U,Vh)) # inverse rotation of polar decomposition
U = np.matmul(R_inv,defgrad['data']) # F = RU
(D,V) = np.linalg.eigh((U+np.einsum('ikj',U))*.5) # eigen decomposition (of symmetric(ed) matrix)
neg = np.where(D < 0.0) # find negative eigenvalues ...
D[neg[0],neg[1]] = D[neg[0],neg[1]]* -1 # ... flip value ...
V[neg[0],:,neg[1]] = V[neg[0],:,neg[1]]* -1 # ... and vector
d = operator['V#ln'](D)
a = np.matmul(V,np.einsum('ij,ikj->ijk',d,V))
return {
'data' : a,
'label' : 'ln(V)({})'.format(defgrad['label']),
'meta' : {
'Unit' : defgrad['meta']['Unit'],
'Description' : 'Strain tensor ln(V){} ({})'.format(defgrad['label'],defgrad['meta']['Description']),
'Creator' : 'dadf5.py:add_deviator vXXXXX'
}
}
requested = [{'label':defgrad,'arg':'defgrad'}]
self.__add_generic_pointwise(strain_tensor,requested,{'t':t,'ord':ord})
def __add_generic_pointwise(self,func,datasets_requested,extra_args={}):
"""
General function to add pointwise data.
Parameters
----------
func : function
Function that calculates a new dataset from one or more datasets per HDF5 group.
datasets_requested : list of dictionaries
Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func).
extra_args : dictionary, optional
Any extra arguments parsed to func.
"""
def job(args):
"""Call function with input data + extra arguments, returns results + group."""
args['results'].put({**args['func'](**args['in']),'group':args['group']})
N_threads = 1 # ToDo: should be a parameter
results = Queue(N_threads)
pool = util.ThreadPool(N_threads)
N_added = N_threads + 1
todo = []
# ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task
for group in self.groups_with_datasets([d['label'] for d in datasets_requested]):
with h5py.File(self.filename,'r') as f:
datasets_in = {}
for d in datasets_requested:
loc = f[group+'/'+d['label']]
data = loc[()]
meta = {k:loc.attrs[k] for k in loc.attrs.keys()}
datasets_in[d['arg']] = {'data': data, 'meta' : meta, 'label' : d['label']}
todo.append({'in':{**datasets_in,**extra_args},'func':func,'group':group,'results':results})
pool.map(job, todo[:N_added]) # initialize
N_not_calculated = len(todo)
while N_not_calculated > 0:
result = results.get()
with h5py.File(self.filename,'a') as f: # write to file
dataset_out = f[result['group']].create_dataset(result['label'],data=result['data'])
for k in result['meta'].keys():
dataset_out.attrs[k] = result['meta'][k]
N_not_calculated-=1
if N_added < len(todo): # add more jobs
pool.add_task(job,todo[N_added])
N_added +=1
pool.wait_completion()

View File

@ -4,6 +4,9 @@ import os
import subprocess
import shlex
from optparse import Option
from queue import Queue
from threading import Thread
import numpy as np
@ -464,3 +467,46 @@ def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw):
pcov = np.inf
return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov)
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
# An exception happened in this thread
print(e)
finally:
# Mark this task as done, whether an exception happened or not
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def map(self, func, args_list):
"""Add a list of tasks to the queue"""
for args in args_list:
self.add_task(func, args)
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()

View File

@ -97,11 +97,16 @@ subroutine config_init
enddo
if (size(config_homogenization) < 1) call IO_error(160,ext_msg='<homogenization>')
if (size(config_microstructure) < 1) call IO_error(160,ext_msg='<microstructure>')
if (size(config_crystallite) < 1) call IO_error(160,ext_msg='<crystallite>')
if (size(config_phase) < 1) call IO_error(160,ext_msg='<phase>')
if (size(config_texture) < 1) call IO_error(160,ext_msg='<texture>')
if (.not. allocated(config_homogenization) .or. size(config_homogenization) < 1) &
call IO_error(160,ext_msg='<homogenization>')
if (.not. allocated(config_microstructure) .or. size(config_microstructure) < 1) &
call IO_error(160,ext_msg='<microstructure>')
if (.not. allocated(config_crystallite) .or. size(config_crystallite) < 1) &
call IO_error(160,ext_msg='<crystallite>')
if (.not. allocated(config_phase) .or. size(config_phase) < 1) &
call IO_error(160,ext_msg='<phase>')
if (.not. allocated(config_texture) .or. size(config_texture) < 1) &
call IO_error(160,ext_msg='<texture>')
inquire(file='numerics.config', exist=fileExists)

View File

@ -800,6 +800,8 @@ subroutine homogenization_results
integer :: p
character(len=256) :: group
real(pReal), dimension(:,:,:), allocatable :: temp
do p=1,size(config_name_homogenization)
group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p))
@ -812,7 +814,17 @@ subroutine homogenization_results
case(HOMOGENIZATION_rgc_ID)
call mech_RGC_results(homogenization_typeInstance(p),group)
end select
group = trim('current/materialpoint')//'/'//trim(config_name_homogenization(p))//'/generic'
call HDF5_closeGroup(results_addGroup(group))
!temp = reshape(materialpoint_F,[3,3,discretization_nIP*discretization_nElem])
!call results_writeDataset(group,temp,'F',&
! 'deformation gradient','1')
!temp = reshape(materialpoint_P,[3,3,discretization_nIP*discretization_nElem])
!call results_writeDataset(group,temp,'P',&
! '1st Piola-Kirchoff stress','Pa')
enddo
#endif
end subroutine homogenization_results

View File

@ -747,13 +747,13 @@ pure function lattice_symmetrizeC66(struct,C66)
case (LATTICE_iso_ID)
do k=1,3
forall(j=1:3) lattice_symmetrizeC66(k,j) = C66(1,2)
lattice_symmetrizeC66(k,k) = C66(1,1)
lattice_symmetrizeC66(k,k) = C66(1,1)
lattice_symmetrizeC66(k+3,k+3) = 0.5_pReal*(C66(1,1)-C66(1,2))
enddo
case (LATTICE_fcc_ID,LATTICE_bcc_ID)
do k=1,3
forall(j=1:3) lattice_symmetrizeC66(k,j) = C66(1,2)
lattice_symmetrizeC66(k,k) = C66(1,1)
forall(j=1:3) lattice_symmetrizeC66(k,j) = C66(1,2)
lattice_symmetrizeC66(k,k) = C66(1,1)
lattice_symmetrizeC66(k+3,k+3) = C66(4,4)
enddo
case (LATTICE_hex_ID)