Merge branch 'misc-improvements' into 'development'

Misc improvements

See merge request damask/DAMASK!154
This commit is contained in:
Vitesh 2020-04-12 17:09:55 +02:00
commit c74ffae88c
11 changed files with 281 additions and 214 deletions

View File

@ -6,7 +6,7 @@ name = 'damask'
with open(_os.path.join(_os.path.dirname(__file__),'VERSION')) as _f: with open(_os.path.join(_os.path.dirname(__file__),'VERSION')) as _f:
version = _re.sub(r'^v','',_f.readline().strip()) version = _re.sub(r'^v','',_f.readline().strip())
# classes # make classes directly accessible as damask.Class
from ._environment import Environment # noqa from ._environment import Environment # noqa
from ._table import Table # noqa from ._table import Table # noqa
from ._vtk import VTK # noqa from ._vtk import VTK # noqa

View File

@ -291,7 +291,7 @@ class Geom:
comments = [] comments = []
for i,line in enumerate(content[:header_length]): for i,line in enumerate(content[:header_length]):
items = line.lower().strip().split() items = line.split('#')[0].lower().strip().split()
key = items[0] if items else '' key = items[0] if items else ''
if key == 'grid': if key == 'grid':
grid = np.array([ int(dict(zip(items[1::2],items[2::2]))[i]) for i in ['a','b','c']]) grid = np.array([ int(dict(zip(items[1::2],items[2::2]))[i]) for i in ['a','b','c']])
@ -307,7 +307,7 @@ class Geom:
microstructure = np.empty(grid.prod()) # initialize as flat array microstructure = np.empty(grid.prod()) # initialize as flat array
i = 0 i = 0
for line in content[header_length:]: for line in content[header_length:]:
items = line.split() items = line.split('#')[0].split()
if len(items) == 3: if len(items) == 3:
if items[1].lower() == 'of': if items[1].lower() == 'of':
items = np.ones(int(items[0]))*float(items[2]) items = np.ones(int(items[0]))*float(items[2])

View File

@ -1,5 +1,5 @@
from scipy import spatial from scipy import spatial as _spatial
import numpy as np import numpy as _np
def _ks(size,grid,first_order=False): def _ks(size,grid,first_order=False):
""" """
@ -11,16 +11,16 @@ def _ks(size,grid,first_order=False):
physical size of the periodic field. physical size of the periodic field.
""" """
k_sk = np.where(np.arange(grid[0])>grid[0]//2,np.arange(grid[0])-grid[0],np.arange(grid[0]))/size[0] k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0]
if grid[0]%2 == 0 and first_order: k_sk[grid[0]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) if grid[0]%2 == 0 and first_order: k_sk[grid[0]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/size[1] k_sj = _np.where(_np.arange(grid[1])>grid[1]//2,_np.arange(grid[1])-grid[1],_np.arange(grid[1]))/size[1]
if grid[1]%2 == 0 and first_order: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011) if grid[1]%2 == 0 and first_order: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[2]//2+1)/size[2] k_si = _np.arange(grid[2]//2+1)/size[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') kk, kj, ki = _np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
return np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) return _np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
def curl(size,field): def curl(size,field):
@ -33,18 +33,18 @@ def curl(size,field):
physical size of the periodic field. physical size of the periodic field.
""" """
n = np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
k_s = _ks(size,field.shape[:3],True) k_s = _ks(size,field.shape[:3],True)
e = np.zeros((3, 3, 3)) e = _np.zeros((3, 3, 3))
e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = +1.0 # Levi-Civita symbol e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = +1.0 # Levi-Civita symbol
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0 e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
field_fourier = np.fft.rfftn(field,axes=(0,1,2)) field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
curl_ = (np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 3 curl_ = (_np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 3
np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3x3 _np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3x3
return np.fft.irfftn(curl_,axes=(0,1,2),s=field.shape[:3]) return _np.fft.irfftn(curl_,axes=(0,1,2),s=field.shape[:3])
def divergence(size,field): def divergence(size,field):
@ -57,14 +57,14 @@ def divergence(size,field):
physical size of the periodic field. physical size of the periodic field.
""" """
n = np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
k_s = _ks(size,field.shape[:3],True) k_s = _ks(size,field.shape[:3],True)
field_fourier = np.fft.rfftn(field,axes=(0,1,2)) field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
div_ = (np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*np.pi if n == 3 else # vector, 3 -> 1 div_ = (_np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 1
np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*np.pi) # tensor, 3x3 -> 3 _np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3
return np.fft.irfftn(div_,axes=(0,1,2),s=field.shape[:3]) return _np.fft.irfftn(div_,axes=(0,1,2),s=field.shape[:3])
def gradient(size,field): def gradient(size,field):
@ -77,17 +77,17 @@ def gradient(size,field):
physical size of the periodic field. physical size of the periodic field.
""" """
n = np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
k_s = _ks(size,field.shape[:3],True) k_s = _ks(size,field.shape[:3],True)
field_fourier = np.fft.rfftn(field,axes=(0,1,2)) field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
grad_ = (np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*np.pi if n == 1 else # scalar, 1 -> 3 grad_ = (_np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*_np.pi if n == 1 else # scalar, 1 -> 3
np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*np.pi) # vector, 3 -> 3x3 _np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*_np.pi) # vector, 3 -> 3x3
return np.fft.irfftn(grad_,axes=(0,1,2),s=field.shape[:3]) return _np.fft.irfftn(grad_,axes=(0,1,2),s=field.shape[:3])
def cell_coord0(grid,size,origin=np.zeros(3)): def cell_coord0(grid,size,origin=_np.zeros(3)):
""" """
Cell center positions (undeformed). Cell center positions (undeformed).
@ -103,7 +103,7 @@ def cell_coord0(grid,size,origin=np.zeros(3)):
""" """
start = origin + size/grid*.5 start = origin + size/grid*.5
end = origin + size - size/grid*.5 end = origin + size - size/grid*.5
return np.mgrid[start[0]:end[0]:grid[0]*1j,start[1]:end[1]:grid[1]*1j,start[2]:end[2]:grid[2]*1j].T return _np.mgrid[start[0]:end[0]:grid[0]*1j,start[1]:end[1]:grid[1]*1j,start[2]:end[2]:grid[2]*1j].T
def cell_displacement_fluct(size,F): def cell_displacement_fluct(size,F):
@ -118,19 +118,19 @@ def cell_displacement_fluct(size,F):
deformation gradient field. deformation gradient field.
""" """
integrator = 0.5j*size/np.pi integrator = 0.5j*size/_np.pi
k_s = _ks(size,F.shape[:3],False) k_s = _ks(size,F.shape[:3],False)
k_s_squared = np.einsum('...l,...l',k_s,k_s) k_s_squared = _np.einsum('...l,...l',k_s,k_s)
k_s_squared[0,0,0] = 1.0 k_s_squared[0,0,0] = 1.0
displacement = -np.einsum('ijkml,ijkl,l->ijkm', displacement = -_np.einsum('ijkml,ijkl,l->ijkm',
np.fft.rfftn(F,axes=(0,1,2)), _np.fft.rfftn(F,axes=(0,1,2)),
k_s, k_s,
integrator, integrator,
) / k_s_squared[...,np.newaxis] ) / k_s_squared[...,_np.newaxis]
return np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3]) return _np.fft.irfftn(displacement,axes=(0,1,2),s=F.shape[:3])
def cell_displacement_avg(size,F): def cell_displacement_avg(size,F):
@ -145,8 +145,8 @@ def cell_displacement_avg(size,F):
deformation gradient field. deformation gradient field.
""" """
F_avg = np.average(F,axis=(0,1,2)) F_avg = _np.average(F,axis=(0,1,2))
return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),cell_coord0(F.shape[:3][::-1],size)) return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),cell_coord0(F.shape[:3][::-1],size))
def cell_displacement(size,F): def cell_displacement(size,F):
@ -164,7 +164,7 @@ def cell_displacement(size,F):
return cell_displacement_avg(size,F) + cell_displacement_fluct(size,F) return cell_displacement_avg(size,F) + cell_displacement_fluct(size,F)
def cell_coord(size,F,origin=np.zeros(3)): def cell_coord(size,F,origin=_np.zeros(3)):
""" """
Cell center positions. Cell center positions.
@ -193,17 +193,17 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True):
expect coord0 data to be ordered (x fast, z slow). expect coord0 data to be ordered (x fast, z slow).
""" """
coords = [np.unique(coord0[:,i]) for i in range(3)] coords = [_np.unique(coord0[:,i]) for i in range(3)]
mincorner = np.array(list(map(min,coords))) mincorner = _np.array(list(map(min,coords)))
maxcorner = np.array(list(map(max,coords))) maxcorner = _np.array(list(map(max,coords)))
grid = np.array(list(map(len,coords)),'i') grid = _np.array(list(map(len,coords)),'i')
size = grid/np.maximum(grid-1,1) * (maxcorner-mincorner) size = grid/_np.maximum(grid-1,1) * (maxcorner-mincorner)
delta = size/grid delta = size/grid
origin = mincorner - delta*.5 origin = mincorner - delta*.5
# 1D/2D: size/origin combination undefined, set origin to 0.0 # 1D/2D: size/origin combination undefined, set origin to 0.0
size [np.where(grid==1)] = origin[np.where(grid==1)]*2. size [_np.where(grid==1)] = origin[_np.where(grid==1)]*2.
origin[np.where(grid==1)] = 0.0 origin[_np.where(grid==1)] = 0.0
if grid.prod() != len(coord0): if grid.prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
@ -211,13 +211,13 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True):
start = origin + delta*.5 start = origin + delta*.5
end = origin - delta*.5 + size end = origin - delta*.5 + size
if not np.allclose(coords[0],np.linspace(start[0],end[0],grid[0])) and \ if not _np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
np.allclose(coords[1],np.linspace(start[1],end[1],grid[1])) and \ _np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
np.allclose(coords[2],np.linspace(start[2],end[2],grid[2])): _np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2])):
raise ValueError('Regular grid spacing violated.') raise ValueError('Regular grid spacing violated.')
if ordered and not np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)): if ordered and not _np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.') raise ValueError('I_nput data is not a regular grid.')
return (grid,size,origin) return (grid,size,origin)
@ -235,7 +235,7 @@ def coord0_check(coord0):
cell_coord0_gridSizeOrigin(coord0,ordered=True) cell_coord0_gridSizeOrigin(coord0,ordered=True)
def node_coord0(grid,size,origin=np.zeros(3)): def node_coord0(grid,size,origin=_np.zeros(3)):
""" """
Nodal positions (undeformed). Nodal positions (undeformed).
@ -249,7 +249,7 @@ def node_coord0(grid,size,origin=np.zeros(3)):
physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
""" """
return np.mgrid[origin[0]:size[0]+origin[0]:(grid[0]+1)*1j, return _np.mgrid[origin[0]:size[0]+origin[0]:(grid[0]+1)*1j,
origin[1]:size[1]+origin[1]:(grid[1]+1)*1j, origin[1]:size[1]+origin[1]:(grid[1]+1)*1j,
origin[2]:size[2]+origin[2]:(grid[2]+1)*1j].T origin[2]:size[2]+origin[2]:(grid[2]+1)*1j].T
@ -281,8 +281,8 @@ def node_displacement_avg(size,F):
deformation gradient field. deformation gradient field.
""" """
F_avg = np.average(F,axis=(0,1,2)) F_avg = _np.average(F,axis=(0,1,2))
return np.einsum('ml,ijkl->ijkm',F_avg-np.eye(3),node_coord0(F.shape[:3][::-1],size)) return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),node_coord0(F.shape[:3][::-1],size))
def node_displacement(size,F): def node_displacement(size,F):
@ -300,7 +300,7 @@ def node_displacement(size,F):
return node_displacement_avg(size,F) + node_displacement_fluct(size,F) return node_displacement_avg(size,F) + node_displacement_fluct(size,F)
def node_coord(size,F,origin=np.zeros(3)): def node_coord(size,F,origin=_np.zeros(3)):
""" """
Nodal positions. Nodal positions.
@ -319,18 +319,18 @@ def node_coord(size,F,origin=np.zeros(3)):
def cell_2_node(cell_data): def cell_2_node(cell_data):
"""Interpolate periodic cell data to nodal data.""" """Interpolate periodic cell data to nodal data."""
n = ( cell_data + np.roll(cell_data,1,(0,1,2)) n = ( cell_data + _np.roll(cell_data,1,(0,1,2))
+ np.roll(cell_data,1,(0,)) + np.roll(cell_data,1,(1,)) + np.roll(cell_data,1,(2,)) + _np.roll(cell_data,1,(0,)) + _np.roll(cell_data,1,(1,)) + _np.roll(cell_data,1,(2,))
+ np.roll(cell_data,1,(0,1)) + np.roll(cell_data,1,(1,2)) + np.roll(cell_data,1,(2,0)))*0.125 + _np.roll(cell_data,1,(0,1)) + _np.roll(cell_data,1,(1,2)) + _np.roll(cell_data,1,(2,0)))*0.125
return np.pad(n,((0,1),(0,1),(0,1))+((0,0),)*len(cell_data.shape[3:]),mode='wrap') return _np.pad(n,((0,1),(0,1),(0,1))+((0,0),)*len(cell_data.shape[3:]),mode='wrap')
def node_2_cell(node_data): def node_2_cell(node_data):
"""Interpolate periodic nodal data to cell data.""" """Interpolate periodic nodal data to cell data."""
c = ( node_data + np.roll(node_data,1,(0,1,2)) c = ( node_data + _np.roll(node_data,1,(0,1,2))
+ np.roll(node_data,1,(0,)) + np.roll(node_data,1,(1,)) + np.roll(node_data,1,(2,)) + _np.roll(node_data,1,(0,)) + _np.roll(node_data,1,(1,)) + _np.roll(node_data,1,(2,))
+ np.roll(node_data,1,(0,1)) + np.roll(node_data,1,(1,2)) + np.roll(node_data,1,(2,0)))*0.125 + _np.roll(node_data,1,(0,1)) + _np.roll(node_data,1,(1,2)) + _np.roll(node_data,1,(2,0)))*0.125
return c[:-1,:-1,:-1] return c[:-1,:-1,:-1]
@ -347,23 +347,23 @@ def node_coord0_gridSizeOrigin(coord0,ordered=False):
expect coord0 data to be ordered (x fast, z slow). expect coord0 data to be ordered (x fast, z slow).
""" """
coords = [np.unique(coord0[:,i]) for i in range(3)] coords = [_np.unique(coord0[:,i]) for i in range(3)]
mincorner = np.array(list(map(min,coords))) mincorner = _np.array(list(map(min,coords)))
maxcorner = np.array(list(map(max,coords))) maxcorner = _np.array(list(map(max,coords)))
grid = np.array(list(map(len,coords)),'i') - 1 grid = _np.array(list(map(len,coords)),'i') - 1
size = maxcorner-mincorner size = maxcorner-mincorner
origin = mincorner origin = mincorner
if (grid+1).prod() != len(coord0): if (grid+1).prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
if not np.allclose(coords[0],np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \ if not _np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
np.allclose(coords[1],np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \ _np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
np.allclose(coords[2],np.linspace(mincorner[2],maxcorner[2],grid[2]+1)): _np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1)):
raise ValueError('Regular grid spacing violated.') raise ValueError('Regular grid spacing violated.')
if ordered and not np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)): if ordered and not _np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.') raise ValueError('I_nput data is not a regular grid.')
return (grid,size,origin) return (grid,size,origin)
@ -386,10 +386,10 @@ def regrid(size,F,new_grid):
+ cell_displacement_avg(size,F) \ + cell_displacement_avg(size,F) \
+ cell_displacement_fluct(size,F) + cell_displacement_fluct(size,F)
outer = np.dot(np.average(F,axis=(0,1,2)),size) outer = _np.dot(_np.average(F,axis=(0,1,2)),size)
for d in range(3): for d in range(3):
c[np.where(c[:,:,:,d]<0)] += outer[d] c[_np.where(c[:,:,:,d]<0)] += outer[d]
c[np.where(c[:,:,:,d]>outer[d])] -= outer[d] c[_np.where(c[:,:,:,d]>outer[d])] -= outer[d]
tree = spatial.cKDTree(c.reshape(-1,3),boxsize=outer) tree = _spatial.cKDTree(c.reshape(-1,3),boxsize=outer)
return tree.query(cell_coord0(new_grid,outer))[1].flatten() return tree.query(cell_coord0(new_grid,outer))[1].flatten()

View File

@ -1,4 +1,4 @@
import numpy as np import numpy as _np
def Cauchy(P,F): def Cauchy(P,F):
""" """
@ -14,10 +14,10 @@ def Cauchy(P,F):
First Piola-Kirchhoff stress. First Piola-Kirchhoff stress.
""" """
if np.shape(F) == np.shape(P) == (3,3): if _np.shape(F) == _np.shape(P) == (3,3):
sigma = 1.0/np.linalg.det(F) * np.dot(P,F.T) sigma = 1.0/_np.linalg.det(F) * _np.dot(P,F.T)
else: else:
sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),P,F) sigma = _np.einsum('i,ijk,ilk->ijl',1.0/_np.linalg.det(F),P,F)
return symmetric(sigma) return symmetric(sigma)
@ -31,8 +31,8 @@ def deviatoric_part(T):
Tensor of which the deviatoric part is computed. Tensor of which the deviatoric part is computed.
""" """
return T - np.eye(3)*spherical_part(T) if np.shape(T) == (3,3) else \ return T - _np.eye(3)*spherical_part(T) if _np.shape(T) == (3,3) else \
T - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[T.shape[0],3,3]),spherical_part(T)) T - _np.einsum('ijk,i->ijk',_np.broadcast_to(_np.eye(3),[T.shape[0],3,3]),spherical_part(T))
def eigenvalues(T_sym): def eigenvalues(T_sym):
@ -48,7 +48,7 @@ def eigenvalues(T_sym):
Symmetric tensor of which the eigenvalues are computed. Symmetric tensor of which the eigenvalues are computed.
""" """
return np.linalg.eigvalsh(symmetric(T_sym)) return _np.linalg.eigvalsh(symmetric(T_sym))
def eigenvectors(T_sym,RHS=False): def eigenvectors(T_sym,RHS=False):
@ -65,13 +65,13 @@ def eigenvectors(T_sym,RHS=False):
Enforce right-handed coordinate system. Default is False. Enforce right-handed coordinate system. Default is False.
""" """
(u,v) = np.linalg.eigh(symmetric(T_sym)) (u,v) = _np.linalg.eigh(symmetric(T_sym))
if RHS: if RHS:
if np.shape(T_sym) == (3,3): if _np.shape(T_sym) == (3,3):
if np.linalg.det(v) < 0.0: v[:,2] *= -1.0 if _np.linalg.det(v) < 0.0: v[:,2] *= -1.0
else: else:
v[np.linalg.det(v) < 0.0,:,2] *= -1.0 v[_np.linalg.det(v) < 0.0,:,2] *= -1.0
return v return v
@ -99,7 +99,7 @@ def maximum_shear(T_sym):
""" """
w = eigenvalues(T_sym) w = eigenvalues(T_sym)
return (w[0] - w[2])*0.5 if np.shape(T_sym) == (3,3) else \ return (w[0] - w[2])*0.5 if _np.shape(T_sym) == (3,3) else \
(w[:,0] - w[:,2])*0.5 (w[:,0] - w[:,2])*0.5
@ -141,10 +141,10 @@ def PK2(P,F):
Deformation gradient. Deformation gradient.
""" """
if np.shape(F) == np.shape(P) == (3,3): if _np.shape(F) == _np.shape(P) == (3,3):
S = np.dot(np.linalg.inv(F),P) S = _np.dot(_np.linalg.inv(F),P)
else: else:
S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P) S = _np.einsum('ijk,ikl->ijl',_np.linalg.inv(F),P)
return symmetric(S) return symmetric(S)
@ -187,14 +187,14 @@ def spherical_part(T,tensor=False):
""" """
if T.shape == (3,3): if T.shape == (3,3):
sph = np.trace(T)/3.0 sph = _np.trace(T)/3.0
return sph if not tensor else np.eye(3)*sph return sph if not tensor else _np.eye(3)*sph
else: else:
sph = np.trace(T,axis1=1,axis2=2)/3.0 sph = _np.trace(T,axis1=1,axis2=2)/3.0
if not tensor: if not tensor:
return sph return sph
else: else:
return np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),(T.shape[0],3,3)),sph) return _np.einsum('ijk,i->ijk',_np.broadcast_to(_np.eye(3),(T.shape[0],3,3)),sph)
def strain_tensor(F,t,m): def strain_tensor(F,t,m):
@ -216,22 +216,22 @@ def strain_tensor(F,t,m):
""" """
F_ = F.reshape(1,3,3) if F.shape == (3,3) else F F_ = F.reshape(1,3,3) if F.shape == (3,3) else F
if t == 'V': if t == 'V':
B = np.matmul(F_,transpose(F_)) B = _np.matmul(F_,transpose(F_))
w,n = np.linalg.eigh(B) w,n = _np.linalg.eigh(B)
elif t == 'U': elif t == 'U':
C = np.matmul(transpose(F_),F_) C = _np.matmul(transpose(F_),F_)
w,n = np.linalg.eigh(C) w,n = _np.linalg.eigh(C)
if m > 0.0: if m > 0.0:
eps = 1.0/(2.0*abs(m)) * (+ np.matmul(n,np.einsum('ij,ikj->ijk',w**m,n)) eps = 1.0/(2.0*abs(m)) * (+ _np.matmul(n,_np.einsum('ij,ikj->ijk',w**m,n))
- np.broadcast_to(np.eye(3),[F_.shape[0],3,3])) - _np.broadcast_to(_np.eye(3),[F_.shape[0],3,3]))
elif m < 0.0: elif m < 0.0:
eps = 1.0/(2.0*abs(m)) * (- np.matmul(n,np.einsum('ij,ikj->ijk',w**m,n)) eps = 1.0/(2.0*abs(m)) * (- _np.matmul(n,_np.einsum('ij,ikj->ijk',w**m,n))
+ np.broadcast_to(np.eye(3),[F_.shape[0],3,3])) + _np.broadcast_to(_np.eye(3),[F_.shape[0],3,3]))
else: else:
eps = np.matmul(n,np.einsum('ij,ikj->ijk',0.5*np.log(w),n)) eps = _np.matmul(n,_np.einsum('ij,ikj->ijk',0.5*_np.log(w),n))
return eps.reshape(3,3) if np.shape(F) == (3,3) else \ return eps.reshape(3,3) if _np.shape(F) == (3,3) else \
eps eps
@ -258,8 +258,8 @@ def transpose(T):
Tensor of which the transpose is computed. Tensor of which the transpose is computed.
""" """
return T.T if np.shape(T) == (3,3) else \ return T.T if _np.shape(T) == (3,3) else \
np.transpose(T,(0,2,1)) _np.transpose(T,(0,2,1))
def _polar_decomposition(T,requested): def _polar_decomposition(T,requested):
@ -275,17 +275,17 @@ def _polar_decomposition(T,requested):
V for left stretch tensor and U for right stretch tensor. V for left stretch tensor and U for right stretch tensor.
""" """
u, s, vh = np.linalg.svd(T) u, s, vh = _np.linalg.svd(T)
R = np.dot(u,vh) if np.shape(T) == (3,3) else \ R = _np.dot(u,vh) if _np.shape(T) == (3,3) else \
np.einsum('ijk,ikl->ijl',u,vh) _np.einsum('ijk,ikl->ijl',u,vh)
output = [] output = []
if 'R' in requested: if 'R' in requested:
output.append(R) output.append(R)
if 'V' in requested: if 'V' in requested:
output.append(np.dot(T,R.T) if np.shape(T) == (3,3) else np.einsum('ijk,ilk->ijl',T,R)) output.append(_np.dot(T,R.T) if _np.shape(T) == (3,3) else _np.einsum('ijk,ilk->ijl',T,R))
if 'U' in requested: if 'U' in requested:
output.append(np.dot(R.T,T) if np.shape(T) == (3,3) else np.einsum('ikj,ikl->ijl',R,T)) output.append(_np.dot(R.T,T) if _np.shape(T) == (3,3) else _np.einsum('ikj,ikl->ijl',R,T))
return tuple(output) return tuple(output)
@ -303,5 +303,5 @@ def _Mises(T_sym,s):
""" """
d = deviatoric_part(T_sym) d = deviatoric_part(T_sym)
return np.sqrt(s*(np.sum(d**2.0))) if np.shape(T_sym) == (3,3) else \ return _np.sqrt(s*(_np.sum(d**2.0))) if _np.shape(T_sym) == (3,3) else \
np.sqrt(s*np.einsum('ijk->i',d**2.0)) _np.sqrt(s*_np.einsum('ijk->i',d**2.0))

View File

@ -9,37 +9,22 @@ from optparse import Option
import numpy as np import numpy as np
class bcolors: # limit visibility
""" __all__=[
ASCII Colors. 'srepr',
'croak',
https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py 'report',
https://stackoverflow.com/questions/287871 'emph','deemph','delete','strikeout',
""" 'execute',
'show_progress',
HEADER = '\033[95m' 'scale_to_coprime',
OKBLUE = '\033[94m' 'return_message',
OKGREEN = '\033[92m' 'extendableOption',
WARNING = '\033[93m' ]
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
DIM = '\033[2m'
UNDERLINE = '\033[4m'
CROSSOUT = '\033[9m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERLINE = ''
self.CROSSOUT = ''
####################################################################################################
# Functions
####################################################################################################
def srepr(arg,glue = '\n'): def srepr(arg,glue = '\n'):
r""" r"""
Join arguments as individual lines. Join arguments as individual lines.
@ -144,6 +129,52 @@ def execute(cmd,
return out,error return out,error
def show_progress(iterable,N_iter=None,prefix='',bar_length=50):
"""
Decorate a loop with a status bar.
Use similar like enumerate.
Parameters
----------
iterable : iterable/function with yield statement
Iterable (or function with yield statement) to be decorated.
N_iter : int
Total # of iterations. Needed if number of iterations can not be obtained as len(iterable).
prefix : str, optional.
Prefix string.
bar_length : int, optional
Character length of bar. Defaults to 50.
"""
status = _ProgressBar(N_iter if N_iter else len(iterable),prefix,bar_length)
for i,item in enumerate(iterable):
yield item
status.update(i)
def scale_to_coprime(v):
"""Scale vector to co-prime (relatively prime) integers."""
MAX_DENOMINATOR = 1000
def get_square_denominator(x):
"""Denominator of the square of a number."""
return fractions.Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator
def lcm(a, b):
"""Least common multiple."""
return a * b // np.gcd(a, b)
denominators = [int(get_square_denominator(i)) for i in v]
s = reduce(lcm, denominators) ** 0.5
m = (np.array(v)*s).astype(np.int)
return m//reduce(np.gcd,m)
####################################################################################################
# Classes
####################################################################################################
class extendableOption(Option): class extendableOption(Option):
""" """
Used for definition of new option parser action 'extend', which enables to take multiple option arguments. Used for definition of new option parser action 'extend', which enables to take multiple option arguments.
@ -215,47 +246,36 @@ class _ProgressBar:
sys.stderr.write('\n') sys.stderr.write('\n')
sys.stderr.flush() sys.stderr.flush()
def show_progress(iterable,N_iter=None,prefix='',bar_length=50):
class bcolors:
""" """
Decorate a loop with a status bar. ASCII Colors.
Use similar like enumerate.
Parameters
----------
iterable : iterable/function with yield statement
Iterable (or function with yield statement) to be decorated.
N_iter : int
Total # of iterations. Needed if number of iterations can not be obtained as len(iterable).
prefix : str, optional.
Prefix string.
bar_length : int, optional
Character length of bar. Defaults to 50.
https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
https://stackoverflow.com/questions/287871
""" """
status = _ProgressBar(N_iter if N_iter else len(iterable),prefix,bar_length)
for i,item in enumerate(iterable): HEADER = '\033[95m'
yield item OKBLUE = '\033[94m'
status.update(i) OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
DIM = '\033[2m'
UNDERLINE = '\033[4m'
CROSSOUT = '\033[9m'
def disable(self):
def scale_to_coprime(v): self.HEADER = ''
"""Scale vector to co-prime (relatively prime) integers.""" self.OKBLUE = ''
MAX_DENOMINATOR = 1000 self.OKGREEN = ''
self.WARNING = ''
def get_square_denominator(x): self.FAIL = ''
"""Denominator of the square of a number.""" self.ENDC = ''
return fractions.Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator self.BOLD = ''
self.UNDERLINE = ''
def lcm(a, b): self.CROSSOUT = ''
"""Least common multiple."""
return a * b // np.gcd(a, b)
denominators = [int(get_square_denominator(i)) for i in v]
s = reduce(lcm, denominators) ** 0.5
m = (np.array(v)*s).astype(np.int)
return m//reduce(np.gcd,m)
class return_message: class return_message:
@ -276,4 +296,3 @@ class return_message:
def __repr__(self): def __repr__(self):
"""Return message suitable for interactive shells.""" """Return message suitable for interactive shells."""
return srepr(self.message) return srepr(self.message)

View File

@ -24,7 +24,7 @@ class TestGridFilters:
n = grid_filters.node_coord0(grid,size) + size/grid*.5 n = grid_filters.node_coord0(grid,size) + size/grid*.5
assert np.allclose(c,n) assert np.allclose(c,n)
@pytest.mark.parametrize('mode',[('cell'),('node')]) @pytest.mark.parametrize('mode',['cell','node'])
def test_grid_DNA(self,mode): def test_grid_DNA(self,mode):
"""Ensure that xx_coord0_gridSizeOrigin is the inverse of xx_coord0.""" """Ensure that xx_coord0_gridSizeOrigin is the inverse of xx_coord0."""
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
@ -49,7 +49,7 @@ class TestGridFilters:
assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1],grid_filters.cell_2_node( assert np.allclose(grid_filters.node_coord(size,F) [1:-1,1:-1,1:-1],grid_filters.cell_2_node(
grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1]) grid_filters.cell_coord(size,F))[1:-1,1:-1,1:-1])
@pytest.mark.parametrize('mode',[('cell'),('node')]) @pytest.mark.parametrize('mode',['cell','node'])
def test_coord0_origin(self,mode): def test_coord0_origin(self,mode):
origin= np.random.random(3) origin= np.random.random(3)
size = np.random.random(3) # noqa size = np.random.random(3) # noqa
@ -61,22 +61,24 @@ class TestGridFilters:
elif mode == 'node': elif mode == 'node':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,))) assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,)))
@pytest.mark.parametrize('mode',[('cell'),('node')]) @pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg,
def test_displacement_avg_vanishes(self,mode): grid_filters.node_displacement_avg])
def test_displacement_avg_vanishes(self,function):
"""Ensure that random fluctuations in F do not result in average displacement.""" """Ensure that random fluctuations in F do not result in average displacement."""
size = np.random.random(3) # noqa size = np.random.random(3)
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
F = np.random.random(tuple(grid)+(3,3)) F = np.random.random(tuple(grid)+(3,3))
F += np.eye(3) - np.average(F,axis=(0,1,2)) F += np.eye(3) - np.average(F,axis=(0,1,2))
assert np.allclose(eval('grid_filters.{}_displacement_avg(size,F)'.format(mode)),0.0) assert np.allclose(function(size,F),0.0)
@pytest.mark.parametrize('mode',[('cell'),('node')]) @pytest.mark.parametrize('function',[grid_filters.cell_displacement_fluct,
def test_displacement_fluct_vanishes(self,mode): grid_filters.node_displacement_fluct])
def test_displacement_fluct_vanishes(self,function):
"""Ensure that constant F does not result in fluctuating displacement.""" """Ensure that constant F does not result in fluctuating displacement."""
size = np.random.random(3) # noqa size = np.random.random(3)
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3)) # noqa F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3))
assert np.allclose(eval('grid_filters.{}_displacement_fluct(size,F)'.format(mode)),0.0) assert np.allclose(function(size,F),0.0)
def test_regrid(self): def test_regrid(self):
size = np.random.random(3) size = np.random.random(3)

59
src/LAPACK_interface.f90 Normal file
View File

@ -0,0 +1,59 @@
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Fortran interfaces for LAPACK routines
!> @details https://www.netlib.org/lapack/
!--------------------------------------------------------------------------------------------------
module LAPACK_interface
interface
subroutine dgeev(jobvl,jobvr,n,a,lda,wr,wi,vl,ldvl,vr,ldvr,work,lwork,info)
use prec
character, intent(in) :: jobvl,jobvr
integer, intent(in) :: n,lda,ldvl,ldvr,lwork
real(pReal), intent(inout), dimension(lda,n) :: a
real(pReal), intent(out), dimension(n) :: wr,wi
real(pReal), intent(out), dimension(ldvl,n) :: vl
real(pReal), intent(out), dimension(ldvr,n) :: vr
real(pReal), intent(out), dimension(max(1,lwork)) :: work
integer, intent(out) :: info
end subroutine dgeev
subroutine dgesv(n,nrhs,a,lda,ipiv,b,ldb,info)
use prec
integer, intent(in) :: n,nrhs,lda,ldb
real(pReal), intent(inout), dimension(lda,n) :: a
integer, intent(out), dimension(n) :: ipiv
real(pReal), intent(out), dimension(ldb,nrhs) :: b
integer, intent(out) :: info
end subroutine dgesv
subroutine dgetrf(m,n,a,lda,ipiv,info)
use prec
integer, intent(in) :: m,n,lda
real(pReal), intent(inout), dimension(lda,n) :: a
integer, intent(out), dimension(min(m,n)) :: ipiv
integer, intent(out) :: info
end subroutine dgetrf
subroutine dgetri(n,a,lda,ipiv,work,lwork,info)
use prec
integer, intent(in) :: n,lda,lwork
real(pReal), intent(inout), dimension(lda,n) :: a
integer, intent(out), dimension(n) :: ipiv
real(pReal), intent(out), dimension(max(1,lwork)) :: work
integer, intent(out) :: info
end subroutine dgetri
subroutine dsyev(jobz,uplo,n,a,lda,w,work,lwork,info)
use prec
character, intent(in) :: jobz,uplo
integer, intent(in) :: n,lda,lwork
real(pReal), intent(inout), dimension(lda,n) :: a
real(pReal), intent(out), dimension(n) :: w
real(pReal), intent(out), dimension(max(1,lwork)) :: work
integer, intent(out) :: info
end subroutine dsyev
end interface
end module LAPACK_interface

View File

@ -9,6 +9,7 @@
#include "list.f90" #include "list.f90"
#include "future.f90" #include "future.f90"
#include "config.f90" #include "config.f90"
#include "LAPACK_interface.f90"
#include "math.f90" #include "math.f90"
#include "quaternions.f90" #include "quaternions.f90"
#include "Lambert.f90" #include "Lambert.f90"

View File

@ -835,8 +835,6 @@ logical function integrateStress(ipc,ip,el,timeFraction)
jacoCounterLp, & jacoCounterLp, &
jacoCounterLi ! counters to check for Jacobian update jacoCounterLi ! counters to check for Jacobian update
logical :: error logical :: error
external :: &
dgesv
integrateStress = .false. integrateStress = .false.

View File

@ -9,6 +9,7 @@ module math
use prec use prec
use IO use IO
use numerics use numerics
use LAPACK_interface
implicit none implicit none
public public
@ -487,16 +488,12 @@ function math_invSym3333(A)
integer, dimension(6) :: ipiv6 integer, dimension(6) :: ipiv6
real(pReal), dimension(6,6) :: temp66 real(pReal), dimension(6,6) :: temp66
real(pReal), dimension(6*(64+2)) :: work real(pReal), dimension(6*6) :: work
integer :: ierr_i, ierr_f integer :: ierr_i, ierr_f
external :: &
dgetrf, &
dgetri
temp66 = math_sym3333to66(A) temp66 = math_sym3333to66(A)
call dgetrf(6,6,temp66,6,ipiv6,ierr_i) call dgetrf(6,6,temp66,6,ipiv6,ierr_i)
call dgetri(6,temp66,6,ipiv6,work,size(work,1),ierr_f) call dgetri(6,temp66,6,ipiv6,work,size(work,1),ierr_f)
if (ierr_i /= 0 .or. ierr_f /= 0) then if (ierr_i /= 0 .or. ierr_f /= 0) then
call IO_error(400, ext_msg = 'math_invSym3333') call IO_error(400, ext_msg = 'math_invSym3333')
else else
@ -516,11 +513,8 @@ subroutine math_invert(InvA, error, A)
logical, intent(out) :: error logical, intent(out) :: error
integer, dimension(size(A,1)) :: ipiv integer, dimension(size(A,1)) :: ipiv
real(pReal), dimension(size(A,1)*(64+2)) :: work real(pReal), dimension(size(A,1)**2) :: work
integer :: ierr integer :: ierr
external :: &
dgetrf, &
dgetri
invA = A invA = A
call dgetrf(size(A,1),size(A,1),invA,size(A,1),ipiv,ierr) call dgetrf(size(A,1),size(A,1),invA,size(A,1),ipiv,ierr)
@ -884,9 +878,7 @@ subroutine math_eigh(m,w,v,error)
logical, intent(out) :: error logical, intent(out) :: error
integer :: ierr integer :: ierr
real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f real(pReal), dimension(size(m,1)**2) :: work
external :: &
dsyev
v = m ! copy matrix to input (doubles as output) array v = m ! copy matrix to input (doubles as output) array
call dsyev('V','U',size(m,1),v,size(m,1),w,work,size(work,1),ierr) call dsyev('V','U',size(m,1),v,size(m,1),w,work,size(work,1),ierr)
@ -1041,9 +1033,7 @@ function math_eigvalsh(m)
real(pReal), dimension(size(m,1),size(m,1)) :: m_ real(pReal), dimension(size(m,1),size(m,1)) :: m_
integer :: ierr integer :: ierr
real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f real(pReal), dimension(size(m,1)**2) :: work
external :: &
dsyev
m_= m ! copy matrix to input (will be destroyed) m_= m ! copy matrix to input (will be destroyed)
call dsyev('N','U',size(m,1),m_,size(m,1),math_eigvalsh,work,size(work,1),ierr) call dsyev('N','U',size(m,1),m_,size(m,1),math_eigvalsh,work,size(work,1),ierr)

View File

@ -596,8 +596,6 @@ function om2ax(om) result(ax)
real(pReal), dimension(3,3) :: VR, devNull, om_ real(pReal), dimension(3,3) :: VR, devNull, om_
integer :: ierr, i integer :: ierr, i
external :: dgeev
om_ = om om_ = om
! first get the rotation angle ! first get the rotation angle