From 79533b075ee4084048aa355c0c5adbbe2eab719c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 13:56:15 +0100 Subject: [PATCH 01/51] eigenvalues is more specific name than principal components --- python/damask/mechanics.py | 106 +++++++++++++++++++-------------- python/tests/test_mechanics.py | 55 ++++++++++++++--- 2 files changed, 107 insertions(+), 54 deletions(-) diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 307f1d83d..bf23edb40 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -3,9 +3,9 @@ import numpy as np def Cauchy(F,P): """ Return Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. - + Resulting tensor is symmetrized as the Cauchy stress needs to be symmetric. - + Parameters ---------- F : numpy.array of shape (:,3,3) or (3,3) @@ -24,7 +24,7 @@ def Cauchy(F,P): def PK2(F,P): """ Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. - + Parameters ---------- F : numpy.array of shape (:,3,3) or (3,3) @@ -37,16 +37,16 @@ def PK2(F,P): S = np.dot(np.linalg.inv(F),P) else: S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P) - return S + return symmetric(S) def strain_tensor(F,t,m): """ Return strain tensor calculated from deformation gradient. - + For details refer to https://en.wikipedia.org/wiki/Finite_strain_theory and https://de.wikipedia.org/wiki/Verzerrungstensor - + Parameters ---------- F : numpy.array of shape (:,3,3) or (3,3) @@ -64,16 +64,16 @@ def strain_tensor(F,t,m): elif t == 'U': C = np.matmul(transpose(F_),F_) w,n = np.linalg.eigh(C) - + if m > 0.0: - eps = 1.0/(2.0*abs(m)) * (+ np.matmul(n,np.einsum('ij,ikj->ijk',w**m,n)) + eps = 1.0/(2.0*abs(m)) * (+ np.matmul(n,np.einsum('ij,ikj->ijk',w**m,n)) - np.broadcast_to(np.eye(3),[F_.shape[0],3,3])) elif m < 0.0: eps = 1.0/(2.0*abs(m)) * (- np.matmul(n,np.einsum('ij,ikj->ijk',w**m,n)) + np.broadcast_to(np.eye(3),[F_.shape[0],3,3])) else: eps = np.matmul(n,np.einsum('ij,ikj->ijk',0.5*np.log(w),n)) - + return eps.reshape((3,3)) if np.shape(F) == (3,3) else \ eps @@ -81,7 +81,7 @@ def strain_tensor(F,t,m): def deviatoric_part(x): """ Return deviatoric part of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -89,13 +89,13 @@ def deviatoric_part(x): """ return x - np.eye(3)*spherical_part(x) if np.shape(x) == (3,3) else \ - x - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[x.shape[0],3,3]),spherical_part(x)) + x - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[x.shape[0],3,3]),spherical_part(x)) def spherical_part(x,tensor=False): """ Return spherical (hydrostatic) part of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -113,12 +113,12 @@ def spherical_part(x,tensor=False): return sph else: return np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),(x.shape[0],3,3)),sph) - - + + def Mises_stress(sigma): """ Return the Mises equivalent of a stress tensor. - + Parameters ---------- sigma : numpy.array of shape (:,3,3) or (3,3) @@ -128,12 +128,12 @@ def Mises_stress(sigma): s = deviatoric_part(sigma) return np.sqrt(3.0/2.0*(np.sum(s**2.0))) if np.shape(sigma) == (3,3) else \ np.sqrt(3.0/2.0*np.einsum('ijk->i',s**2.0)) - - + + def Mises_strain(epsilon): """ Return the Mises equivalent of a strain tensor. - + Parameters ---------- epsilon : numpy.array of shape (:,3,3) or (3,3) @@ -148,7 +148,7 @@ def Mises_strain(epsilon): def symmetric(x): """ Return the symmetrized tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -161,40 +161,54 @@ def symmetric(x): def maximum_shear(x): """ Return the maximum shear component of a symmetric tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) Symmetric tensor of which the maximum shear is computed. """ - w = np.linalg.eigvalsh(symmetric(x)) # eigenvalues in ascending order - return (w[2] - w[0])*0.5 if np.shape(x) == (3,3) else \ - (w[:,2] - w[:,0])*0.5 - - -def principal_components(x): + w = eigenvalues(x) + return (w[0] - w[2])*0.5 if np.shape(x) == (3,3) else \ + (w[:,0] - w[:,2])*0.5 + + +def eigenvalues(x): """ - Return the principal components of a symmetric tensor. - - The principal components (eigenvalues) are sorted in descending order, each repeated according to + Return the eigenvalues, i.e. principal components, of a symmetric tensor. + + The eigenvalues are sorted in ascending order, each repeated according to its multiplicity. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) - Symmetric tensor of which the principal compontents are computed. + Symmetric tensor of which the eigenvalues are computed. """ - w = np.linalg.eigvalsh(symmetric(x)) # eigenvalues in ascending order - return w[::-1] if np.shape(x) == (3,3) else \ - w[:,::-1] - - + return np.linalg.eigvalsh(symmetric(x)) + + +def eigenvectors(x): + """ + Return eigenvectors of a symmetric tensor. + + The eigenvalues are sorted in ascending order of their associated eigenvalues. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Symmetric tensor of which the eigenvectors are computed. + + """ + (u,v) = np.linalg.eigh(symmetric(x)) + return v + + def transpose(x): """ Return the transpose of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -208,7 +222,7 @@ def transpose(x): def rotational_part(x): """ Return the rotational part of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -221,7 +235,7 @@ def rotational_part(x): def left_stretch(x): """ Return the left stretch of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -229,12 +243,12 @@ def left_stretch(x): """ return __polar_decomposition(x,'V')[0] - - + + def right_stretch(x): """ Return the right stretch of a tensor. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) @@ -247,20 +261,20 @@ def right_stretch(x): def __polar_decomposition(x,requested): """ Singular value decomposition. - + Parameters ---------- x : numpy.array of shape (:,3,3) or (3,3) Tensor of which the singular values are computed. requested : iterable of str - Requested outputs: ‘R’ for the rotation tensor, + Requested outputs: ‘R’ for the rotation tensor, ‘V’ for left stretch tensor and ‘U’ for right stretch tensor. """ u, s, vh = np.linalg.svd(x) R = np.dot(u,vh) if np.shape(x) == (3,3) else \ np.einsum('ijk,ikl->ijl',u,vh) - + output = [] if 'R' in requested: output.append(R) @@ -268,5 +282,5 @@ def __polar_decomposition(x,requested): output.append(np.dot(x,R.T) if np.shape(x) == (3,3) else np.einsum('ijk,ilk->ijl',x,R)) if 'U' in requested: output.append(np.dot(R.T,x) if np.shape(x) == (3,3) else np.einsum('ikj,ikl->ijl',R,x)) - + return tuple(output) diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index 9e1d9bc0c..483452a12 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -2,10 +2,10 @@ import numpy as np from damask import mechanics class TestMechanics: - + n = 1000 c = np.random.randint(n) - + def test_vectorize_Cauchy(self): P = np.random.random((self.n,3,3)) @@ -58,10 +58,23 @@ class TestMechanics: mechanics.maximum_shear(x[self.c])) - def test_vectorize_principal_components(self): + def test_vectorize_eigenvalues(self): x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.principal_components(x)[self.c], - mechanics.principal_components(x[self.c])) + assert np.allclose(mechanics.eigenvalues(x)[self.c], + mechanics.eigenvalues(x[self.c])) + + + def test_vectorize_eigenvectors(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.eigenvectors(x)[self.c], + mechanics.eigenvectors(x[self.c])) + + + def test_vectorize_PK2(self): + F = np.random.random((self.n,3,3)) + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.PK2(F,P)[self.c], + mechanics.PK2(F[self.c],P[self.c])) def test_vectorize_transpose(self): @@ -102,7 +115,14 @@ class TestMechanics: U = mechanics.right_stretch(F) assert np.allclose(np.matmul(R,U), np.matmul(V,R)) - + + + def test_PK2(self): + """Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.PK2(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + mechanics.symmetric(P)) + def test_strain_tensor_no_rotation(self): """Ensure that left and right stretch give same results for no rotation.""" @@ -110,7 +130,7 @@ class TestMechanics: m = np.random.random()*20.0-10.0 assert np.allclose(mechanics.strain_tensor(F,'U',m), mechanics.strain_tensor(F,'V',m)) - + def test_strain_tensor_rotation_equivalence(self): """Ensure that left and right strain differ only by a rotation.""" F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25) @@ -125,7 +145,7 @@ class TestMechanics: m = np.random.random()*2.0 - 1.0 assert np.allclose(mechanics.strain_tensor(F,t,m), 0.0) - + def test_rotation_determinant(self): """ Ensure that the determinant of the rotational part is +- 1. @@ -186,3 +206,22 @@ class TestMechanics: x = np.random.random((self.n,3,3)) assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x), 1.5) + + + def test_eigenvalues(self): + """Ensure that the characteristic polynomial can be solved.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + lambd = mechanics.eigenvalues(A) + s = np.random.randint(self.n) + for i in range(3): + assert np.allclose(np.linalg.det(A[s]-lambd[s,i]*np.eye(3)),.0) + + + def test_eigenvalues_and_vectors(self): + """Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + lambd = mechanics.eigenvalues(A) + x = mechanics.eigenvectors(A) + s = np.random.randint(self.n) + for i in range(3): + assert np.allclose(np.dot(A[s]-lambd[s,i]*np.eye(3),x[s,:,i]),.0) From e46395be41d09fed5a48628f8dbfdb64fd262e26 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 14:10:16 +0100 Subject: [PATCH 02/51] sorted alphabetically --- python/damask/mechanics.py | 317 +++++++++++++++++---------------- python/tests/test_mechanics.py | 126 ++++++------- 2 files changed, 220 insertions(+), 223 deletions(-) diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index bf23edb40..8d89a5e81 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -21,6 +21,106 @@ def Cauchy(F,P): return symmetric(sigma) +def deviatoric_part(x): + """ + Return deviatoric part of a tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Tensor of which the deviatoric part is computed. + + """ + return x - np.eye(3)*spherical_part(x) if np.shape(x) == (3,3) else \ + x - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[x.shape[0],3,3]),spherical_part(x)) + + +def eigenvalues(x): + """ + Return the eigenvalues, i.e. principal components, of a symmetric tensor. + + The eigenvalues are sorted in ascending order, each repeated according to + its multiplicity. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Symmetric tensor of which the eigenvalues are computed. + + """ + return np.linalg.eigvalsh(symmetric(x)) + + +def eigenvectors(x): + """ + Return eigenvectors of a symmetric tensor. + + The eigenvalues are sorted in ascending order of their associated eigenvalues. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Symmetric tensor of which the eigenvectors are computed. + + """ + (u,v) = np.linalg.eigh(symmetric(x)) + return v + + +def left_stretch(x): + """ + Return the left stretch of a tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Tensor of which the left stretch is computed. + + """ + return __polar_decomposition(x,'V')[0] + + +def maximum_shear(x): + """ + Return the maximum shear component of a symmetric tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Symmetric tensor of which the maximum shear is computed. + + """ + w = eigenvalues(x) + return (w[0] - w[2])*0.5 if np.shape(x) == (3,3) else \ + (w[:,0] - w[:,2])*0.5 + + +def Mises_strain(epsilon): + """ + Return the Mises equivalent of a strain tensor. + + Parameters + ---------- + epsilon : numpy.array of shape (:,3,3) or (3,3) + Symmetric strain tensor of which the von Mises equivalent is computed. + + """ + return __Mises(epsilon,2.0/3.0) + + +def Mises_stress(sigma): + """ + Return the Mises equivalent of a stress tensor. + + Parameters + ---------- + sigma : numpy.array of shape (:,3,3) or (3,3) + Symmetric stress tensor of which the von Mises equivalent is computed. + + """ + return __Mises(sigma,3.0/2.0) + + def PK2(F,P): """ Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. @@ -39,6 +139,54 @@ def PK2(F,P): S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P) return symmetric(S) +def right_stretch(x): + """ + Return the right stretch of a tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Tensor of which the right stretch is computed. + + """ + return __polar_decomposition(x,'U')[0] + + +def rotational_part(x): + """ + Return the rotational part of a tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Tensor of which the rotational part is computed. + + """ + return __polar_decomposition(x,'R')[0] + + +def spherical_part(x,tensor=False): + """ + Return spherical (hydrostatic) part of a tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Tensor of which the hydrostatic part is computed. + tensor : bool, optional + Map spherical part onto identity tensor. Default is false + + """ + if x.shape == (3,3): + sph = np.trace(x)/3.0 + return sph if not tensor else np.eye(3)*sph + else: + sph = np.trace(x,axis1=1,axis2=2)/3.0 + if not tensor: + return sph + else: + return np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),(x.shape[0],3,3)),sph) + def strain_tensor(F,t,m): """ @@ -78,73 +226,6 @@ def strain_tensor(F,t,m): eps -def deviatoric_part(x): - """ - Return deviatoric part of a tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Tensor of which the deviatoric part is computed. - - """ - return x - np.eye(3)*spherical_part(x) if np.shape(x) == (3,3) else \ - x - np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),[x.shape[0],3,3]),spherical_part(x)) - - -def spherical_part(x,tensor=False): - """ - Return spherical (hydrostatic) part of a tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Tensor of which the hydrostatic part is computed. - tensor : bool, optional - Map spherical part onto identity tensor. Default is false - - """ - if x.shape == (3,3): - sph = np.trace(x)/3.0 - return sph if not tensor else np.eye(3)*sph - else: - sph = np.trace(x,axis1=1,axis2=2)/3.0 - if not tensor: - return sph - else: - return np.einsum('ijk,i->ijk',np.broadcast_to(np.eye(3),(x.shape[0],3,3)),sph) - - -def Mises_stress(sigma): - """ - Return the Mises equivalent of a stress tensor. - - Parameters - ---------- - sigma : numpy.array of shape (:,3,3) or (3,3) - Symmetric stress tensor of which the von Mises equivalent is computed. - - """ - s = deviatoric_part(sigma) - return np.sqrt(3.0/2.0*(np.sum(s**2.0))) if np.shape(sigma) == (3,3) else \ - np.sqrt(3.0/2.0*np.einsum('ijk->i',s**2.0)) - - -def Mises_strain(epsilon): - """ - Return the Mises equivalent of a strain tensor. - - Parameters - ---------- - epsilon : numpy.array of shape (:,3,3) or (3,3) - Symmetric strain tensor of which the von Mises equivalent is computed. - - """ - s = deviatoric_part(epsilon) - return np.sqrt(2.0/3.0*(np.sum(s**2.0))) if np.shape(epsilon) == (3,3) else \ - np.sqrt(2.0/3.0*np.einsum('ijk->i',s**2.0)) - - def symmetric(x): """ Return the symmetrized tensor. @@ -158,53 +239,6 @@ def symmetric(x): return (x+transpose(x))*0.5 -def maximum_shear(x): - """ - Return the maximum shear component of a symmetric tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Symmetric tensor of which the maximum shear is computed. - - """ - w = eigenvalues(x) - return (w[0] - w[2])*0.5 if np.shape(x) == (3,3) else \ - (w[:,0] - w[:,2])*0.5 - - -def eigenvalues(x): - """ - Return the eigenvalues, i.e. principal components, of a symmetric tensor. - - The eigenvalues are sorted in ascending order, each repeated according to - its multiplicity. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Symmetric tensor of which the eigenvalues are computed. - - """ - return np.linalg.eigvalsh(symmetric(x)) - - -def eigenvectors(x): - """ - Return eigenvectors of a symmetric tensor. - - The eigenvalues are sorted in ascending order of their associated eigenvalues. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Symmetric tensor of which the eigenvectors are computed. - - """ - (u,v) = np.linalg.eigh(symmetric(x)) - return v - - def transpose(x): """ Return the transpose of a tensor. @@ -219,45 +253,6 @@ def transpose(x): np.transpose(x,(0,2,1)) -def rotational_part(x): - """ - Return the rotational part of a tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Tensor of which the rotational part is computed. - - """ - return __polar_decomposition(x,'R')[0] - - -def left_stretch(x): - """ - Return the left stretch of a tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Tensor of which the left stretch is computed. - - """ - return __polar_decomposition(x,'V')[0] - - -def right_stretch(x): - """ - Return the right stretch of a tensor. - - Parameters - ---------- - x : numpy.array of shape (:,3,3) or (3,3) - Tensor of which the right stretch is computed. - - """ - return __polar_decomposition(x,'U')[0] - - def __polar_decomposition(x,requested): """ Singular value decomposition. @@ -284,3 +279,19 @@ def __polar_decomposition(x,requested): output.append(np.dot(R.T,x) if np.shape(x) == (3,3) else np.einsum('ikj,ikl->ijl',R,x)) return tuple(output) + + +def __Mises(x,s): + """ + Base equation for Mises equivalent of a stres or strain tensor. + + Parameters + ---------- + x : numpy.array of shape (:,3,3) or (3,3) + Symmetric tensor of which the von Mises equivalent is computed. + s : float + Scaling factor (2/3 for strain, 3/2 for stress). + """ + d = deviatoric_part(x) + return np.sqrt(s*(np.sum(d**2.0))) if np.shape(x) == (3,3) else \ + np.sqrt(s*np.einsum('ijk->i',d**2.0)) diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index 483452a12..a13310dce 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -13,6 +13,61 @@ class TestMechanics: assert np.allclose(mechanics.Cauchy(F,P)[self.c], mechanics.Cauchy(F[self.c],P[self.c])) + def test_vectorize_deviatoric_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.deviatoric_part(x)[self.c], + mechanics.deviatoric_part(x[self.c])) + + def test_vectorize_eigenvalues(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.eigenvalues(x)[self.c], + mechanics.eigenvalues(x[self.c])) + + def test_vectorize_eigenvectors(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.eigenvectors(x)[self.c], + mechanics.eigenvectors(x[self.c])) + + def test_vectorize_left_stretch(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.left_stretch(x)[self.c], + mechanics.left_stretch(x[self.c])) + + def test_vectorize_maximum_shear(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.maximum_shear(x)[self.c], + mechanics.maximum_shear(x[self.c])) + + def test_vectorize_Mises_strain(self): + epsilon = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_strain(epsilon)[self.c], + mechanics.Mises_strain(epsilon[self.c])) + + def test_vectorize_Mises_stress(self): + sigma = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_stress(sigma)[self.c], + mechanics.Mises_stress(sigma[self.c])) + + def test_vectorize_PK2(self): + F = np.random.random((self.n,3,3)) + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.PK2(F,P)[self.c], + mechanics.PK2(F[self.c],P[self.c])) + + def test_vectorize_right_stretch(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.right_stretch(x)[self.c], + mechanics.right_stretch(x[self.c])) + + def test_vectorize_rotational_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.rotational_part(x)[self.c], + mechanics.rotational_part(x[self.c])) + + def test_vectorize_spherical_part(self): + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.spherical_part(x,True)[self.c], + mechanics.spherical_part(x[self.c],True)) def test_vectorize_strain_tensor(self): F = np.random.random((self.n,3,3)) @@ -21,92 +76,24 @@ class TestMechanics: assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c], mechanics.strain_tensor(F[self.c],t,m)) - - def test_vectorize_deviatoric_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.deviatoric_part(x)[self.c], - mechanics.deviatoric_part(x[self.c])) - - - def test_vectorize_spherical_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.spherical_part(x,True)[self.c], - mechanics.spherical_part(x[self.c],True)) - - - def test_vectorize_Mises_stress(self): - sigma = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Mises_stress(sigma)[self.c], - mechanics.Mises_stress(sigma[self.c])) - - - def test_vectorize_Mises_strain(self): - epsilon = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Mises_strain(epsilon)[self.c], - mechanics.Mises_strain(epsilon[self.c])) - - def test_vectorize_symmetric(self): x = np.random.random((self.n,3,3)) assert np.allclose(mechanics.symmetric(x)[self.c], mechanics.symmetric(x[self.c])) - - def test_vectorize_maximum_shear(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.maximum_shear(x)[self.c], - mechanics.maximum_shear(x[self.c])) - - - def test_vectorize_eigenvalues(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.eigenvalues(x)[self.c], - mechanics.eigenvalues(x[self.c])) - - - def test_vectorize_eigenvectors(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.eigenvectors(x)[self.c], - mechanics.eigenvectors(x[self.c])) - - - def test_vectorize_PK2(self): - F = np.random.random((self.n,3,3)) - P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.PK2(F,P)[self.c], - mechanics.PK2(F[self.c],P[self.c])) - - def test_vectorize_transpose(self): x = np.random.random((self.n,3,3)) assert np.allclose(mechanics.transpose(x)[self.c], mechanics.transpose(x[self.c])) - def test_vectorize_rotational_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.rotational_part(x)[self.c], - mechanics.rotational_part(x[self.c])) - - - def test_vectorize_left_stretch(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.left_stretch(x)[self.c], - mechanics.left_stretch(x[self.c])) - - - def test_vectorize_right_stretch(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.right_stretch(x)[self.c], - mechanics.right_stretch(x[self.c])) - - def test_Cauchy(self): """Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" P = np.random.random((self.n,3,3)) assert np.allclose(mechanics.Cauchy(np.broadcast_to(np.eye(3),(self.n,3,3)),P), mechanics.symmetric(P)) + def test_polar_decomposition(self): """F = RU = VR.""" F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) @@ -216,7 +203,6 @@ class TestMechanics: for i in range(3): assert np.allclose(np.linalg.det(A[s]-lambd[s,i]*np.eye(3)),.0) - def test_eigenvalues_and_vectors(self): """Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial.""" A = mechanics.symmetric(np.random.random((self.n,3,3))) From 5822ad8b05641b6e4e483339405fb5b1c645aa93 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 15:13:56 +0100 Subject: [PATCH 03/51] new functions (takeover from old branch) --- python/damask/dadf5.py | 374 ++++++++++++++++++++++++++++++----------- 1 file changed, 272 insertions(+), 102 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 819b5603e..88ccd05fe 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -16,23 +16,23 @@ from . import mechanics class DADF5(): """ Read and write to DADF5 files. - + DADF5 files contain DAMASK results. """ - + # ------------------------------------------------------------------ def __init__(self,fname): """ Opens an existing DADF5 file. - + Parameters ---------- fname : str name of the DADF5 file to be openend. - + """ with h5py.File(fname,'r') as f: - + try: self.version_major = f.attrs['DADF5_version_major'] self.version_minor = f.attrs['DADF5_version_minor'] @@ -43,16 +43,16 @@ class DADF5(): if self.version_major != 0 or not 2 <= self.version_minor <= 6: raise TypeError('Unsupported DADF5 version {}.{} '.format(f.attrs['DADF5_version_major'], f.attrs['DADF5_version_minor'])) - + self.structured = 'grid' in f['geometry'].attrs.keys() - + if self.structured: self.grid = f['geometry'].attrs['grid'] self.size = f['geometry'].attrs['size'] if self.version_major == 0 and self.version_minor >= 5: self.origin = f['geometry'].attrs['origin'] - + r=re.compile('inc[0-9]+') increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)] @@ -78,23 +78,23 @@ class DADF5(): 'constituent': range(self.Nconstituents), # ToDo: stupid naming 'con_physics': self.con_physics, 'mat_physics': self.mat_physics} - + self.fname = fname def __manage_visible(self,datasets,what,action): """ Manages the visibility of the groups. - + Parameters ---------- datasets : list of str or Boolean name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.visible) action : str - select from 'set', 'add', and 'del' + select from 'set', 'add', and 'del' """ # allow True/False and string arguments @@ -103,7 +103,7 @@ class DADF5(): elif datasets is False: datasets = [] choice = [datasets] if isinstance(datasets,str) else datasets - + valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_] existing = set(self.visible[what]) @@ -113,8 +113,8 @@ class DADF5(): self.visible[what] = list(existing.union(valid)) elif action == 'del': self.visible[what] = list(existing.difference_update(valid)) - - + + def __time_to_inc(self,start,end): selected = [] for i,time in enumerate(self.times): @@ -166,8 +166,8 @@ class DADF5(): """ self.__manage_visible(self.__time_to_inc(start,end),'increments','del') - - + + def set_by_increment(self,start,end): """ Set active time increments based on start and end increment. @@ -229,7 +229,7 @@ class DADF5(): Parameters ---------- what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.visible) """ datasets = self.visible[what] @@ -242,19 +242,19 @@ class DADF5(): last_datasets = self.visible[what] yield dataset self.__manage_visible(datasets,what,'set') - - + + def set_visible(self,what,datasets): """ Set active groups. - + Parameters ---------- datasets : list of str or Boolean name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.visible) """ self.__manage_visible(datasets,what,'set') @@ -263,14 +263,14 @@ class DADF5(): def add_visible(self,what,datasets): """ Add to active groups. - + Parameters ---------- datasets : list of str or Boolean name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.visible) """ self.__manage_visible(datasets,what,'add') @@ -279,14 +279,14 @@ class DADF5(): def del_visible(self,what,datasets): """ Delete from active groupse. - + Parameters ---------- datasets : list of str or Boolean name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.visible) """ self.__manage_visible(datasets,what,'del') @@ -295,17 +295,17 @@ class DADF5(): def groups_with_datasets(self,datasets): """ Get groups that contain all requested datasets. - - Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/* + + Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/* are considered as they contain the data. Single strings will be treated as list with one entry. - + Wild card matching is allowed, but the number of arguments need to fit. - + Parameters ---------- datasets : iterable or str or boolean - + Examples -------- datasets = False matches no group @@ -315,13 +315,13 @@ class DADF5(): datasets = ['*'] does not match a group with ['F','P','sigma'] datasets = ['*','*'] does not match a group with ['F','P','sigma'] datasets = ['*','*','*'] matches a group with ['F','P','sigma'] - + """ if datasets is False: return [] sets = [datasets] if isinstance(datasets,str) else datasets groups = [] - + with h5py.File(self.fname,'r') as f: for i in self.iter_visible('increments'): for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): @@ -378,8 +378,8 @@ class DADF5(): except KeyError as e: pass return path - - + + def get_constituent_ID(self,c=0): """Pointwise constituent ID.""" with h5py.File(self.fname,'r') as f: @@ -396,7 +396,7 @@ class DADF5(): def read_dataset(self,path,c=0,plain=False): """ Dataset for all points/cells. - + If more than one path is given, the dataset is composed of the individual contributions. """ with h5py.File(self.fname,'r') as f: @@ -405,11 +405,11 @@ class DADF5(): dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) for pa in path: label = pa.split('/')[2] - + if (pa.split('/')[1] == 'geometry'): dataset = np.array(f[pa]) continue - + p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] if len(p)>0: u = (f['mapping/cellResults/constituent']['Position'][p,c]) @@ -425,7 +425,7 @@ class DADF5(): if len(a.shape) == 1: a=a.reshape([a.shape[0],1]) dataset[p,:] = a[u,:] - + if plain and dataset.dtype.names is not None: return dataset.view(('float64',len(dataset.dtype.names))) else: @@ -444,8 +444,8 @@ class DADF5(): else: with h5py.File(self.fname,'r') as f: return f['geometry/x_c'][()] - - + + def add_absolute(self,x): """ Add absolute value. @@ -469,14 +469,14 @@ class DADF5(): } requested = [{'label':x,'arg':'x'}] - + self.__add_generic_pointwise(__add_absolute,requested) - + def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): """ Add result of a general formula. - + Parameters ---------- formula : str @@ -493,15 +493,15 @@ class DADF5(): """ if vectorized is False: raise NotImplementedError - + def __add_calculation(**kwargs): - + formula = kwargs['formula'] for d in re.findall(r'#(.*?)#',formula): formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) - + return { - 'data': eval(formula), + 'data': eval(formula), 'label': kwargs['label'], 'meta': { 'Unit': kwargs['unit'], @@ -509,17 +509,17 @@ class DADF5(): 'Creator': 'dadf5.py:add_calculation v{}'.format(version) } } - + requested = [{'label':d,'arg':d} for d in set(re.findall(r'#(.*?)#',formula))] # datasets used in the formula - pass_through = {'formula':formula,'label':label,'unit':unit,'description':description} - + pass_through = {'formula':formula,'label':label,'unit':unit,'description':description} + self.__add_generic_pointwise(__add_calculation,requested,pass_through) def add_Cauchy(self,P='P',F='F'): """ Add Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. - + Parameters ---------- P : str, optional @@ -535,15 +535,17 @@ class DADF5(): 'label': 'sigma', 'meta': { 'Unit': P['meta']['Unit'], - 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'],P['meta']['Description'])+\ - 'and deformation gradient {} ({})'.format(F['label'],F['meta']['Description']), + 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and deformation gradient {} ({})'.format(F['label'], + F['meta']['Description']), 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) } } - + requested = [{'label':F,'arg':'F'}, {'label':P,'arg':'P'} ] - + self.__add_generic_pointwise(__add_Cauchy,requested) @@ -558,7 +560,7 @@ class DADF5(): """ def __add_determinant(x): - + return { 'data': np.linalg.det(x['data']), 'label': 'det({})'.format(x['label']), @@ -568,9 +570,9 @@ class DADF5(): 'Creator': 'dadf5.py:add_determinant v{}'.format(version) } } - + requested = [{'label':x,'arg':'x'}] - + self.__add_generic_pointwise(__add_determinant,requested) @@ -585,10 +587,10 @@ class DADF5(): """ def __add_deviator(x): - + if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): raise ValueError - + return { 'data': mechanics.deviatoric_part(x['data']), 'label': 's_{}'.format(x['label']), @@ -598,16 +600,106 @@ class DADF5(): 'Creator': 'dadf5.py:add_deviator v{}'.format(version) } } - + requested = [{'label':x,'arg':'x'}] - + self.__add_generic_pointwise(__add_deviator,requested) - - + + + def add_eigenvalues(self,x): + """ + Add eigenvalues of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def __add_eigenvalue(x): + + return { + 'data': mechanics.eigenvalues(x['data']), + 'label': 'lambda({})'.format(x['label']), + 'meta' : { + 'Unit': x['meta']['Unit'], + 'Description': 'Eigenvalues of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) + } + } + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_eigenvalue,requested) + + + def add_eigenvectors(self,x): + """ + Add eigenvectors of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def __add_eigenvector(x): + + return { + 'data': mechanics.eigenvectors(x['data']), + 'label': 'v({})'.format(x['label']), + 'meta' : { + 'Unit': '1', + 'Description': 'Eigenvectors of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) + } + } + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_eigenvector,requested) + + + def add_IPFcolor(self,q,p): + """ + Add RGB color tuple of inverse pole figure (IPF) color. + + Parameters + ---------- + orientation : str + Label of the dataset containing the orientation data as quaternions. + pole : list of int + Pole direction as Miller indices. Default value is [0, 0, 1]. + + """ + def __add_IPFcolor(orientation,pole): + + lattice = orientation['meta']['Lattice'] + unit_pole = pole/np.linalg.norm(pole) + colors = np.empty((len(orientation['data']),3),np.uint8) + + for i,q in enumerate(orientation['data']): + rot = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) + orientation = Orientation(rot,lattice = lattice).reduced() + colors[i] = np.uint8(orientation.IPFcolor(unit_pole)*255) + return { + 'data': colors, + 'label': 'IPFcolor_[{} {} {}]'.format(*pole), + 'meta' : { + 'Unit': 'RGB (8bit)', + 'Lattice': lattice, + 'Description': 'Inverse Pole Figure colors', + 'Creator': 'dadf5.py:addIPFcolor v{}'.format(version) + } + } + + requested = [{'label':'orientation','arg':'orientation'}] + + self.__add_generic_pointwise(__add_IPFcolor,requested,{'pole':pole}) + + def add_maximum_shear(self,x): """ Add maximum shear components of symmetric tensor. - + Parameters ---------- x : str @@ -634,7 +726,7 @@ class DADF5(): def add_Mises(self,x): """ Add the equivalent Mises stress or strain of a symmetric tensor. - + Parameters ---------- x : str @@ -654,16 +746,94 @@ class DADF5(): 'Creator': 'dadf5.py:add_Mises v{}'.format(version) } } - + requested = [{'label':x,'arg':'x'}] - + self.__add_generic_pointwise(__add_Mises,requested) + def add_PK2(self,F='F',P='P'): + """ + Add 2. Piola-Kirchhoff calculated from 1. Piola-Kirchhoff stress and deformation gradient. + + Parameters + ---------- + P : str, optional + Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. + + """ + def __add_PK2(F,P): + + return { + 'data': mechanics.PK2(F['data'],P['data']), + 'label': 'S', + 'meta': { + 'Unit': P['meta']['Unit'], + 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and deformation gradient {} ({})'.format(F['label'], + F['meta']['Description']), + 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) + } + } + + requested = [{'label':F,'arg':'F'}, + {'label':P,'arg':'P'},] + + self.__add_generic_pointwise(__add_PK2,requested) + + + def addPole(self,q,p,polar=False): + """ + Add coordinates of stereographic projection of given direction (pole) in crystal frame. + + Parameters + ---------- + q : str + Label of the dataset containing the crystallographic orientation as a quaternion. + p : numpy.array of shape (3) + Pole (direction) in crystal frame. + polar : bool, optional + Give pole in polar coordinates. Default is false. + + """ + def __addPole(orientation,pole): + + pole = np.array(pole) + unit_pole /= np.linalg.norm(pole) + coords = np.empty((len(orientation['data']),2)) + + for i,q in enumerate(orientation['data']): + o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) + rotatedPole = o*pole # rotate pole according to crystal orientation + (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection + + if polar is True: + coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] + else: + coords[i] = [x,y] + + return { + 'data': coords, + 'label': 'Pole', + 'meta' : { + 'Unit': '1', + 'Description': 'Coordinates of stereographic projection of given direction (pole) in crystal frame', + 'Creator' : 'dadf5.py:addPole v{}'.format(version) + } + } + + requested = [{'label':'orientation','arg':'orientation'}] + + self.__add_generic_pointwise(__addPole,requested,{'pole':pole}) + + def add_norm(self,x,ord=None): """ Add the norm of vector or tensor. - + Parameters ---------- x : str @@ -699,14 +869,14 @@ class DADF5(): requested = [{'label':x,'arg':'x'}] self.__add_generic_pointwise(__add_norm,requested,{'ord':ord}) - - + + def add_principal_components(self,x): """ Add principal components of symmetric tensor. - + The principal components are sorted in descending order, each repeated according to its multiplicity. - + Parameters ---------- x : str @@ -741,7 +911,7 @@ class DADF5(): """ def __add_spherical(x): - + if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): raise ValueError @@ -756,16 +926,16 @@ class DADF5(): } requested = [{'label':x,'arg':'x'}] - + self.__add_generic_pointwise(__add_spherical,requested) def add_strain_tensor(self,F='F',t='U',m=0): """ Add strain tensor calculated from a deformation gradient. - + For details refer to damask.mechanics.strain_tensor - + Parameters ---------- F : str, optional @@ -778,9 +948,9 @@ class DADF5(): """ def __add_strain_tensor(F,t,m): - + return { - 'data': mechanics.strain_tensor(F['data'],t,m), + 'data': mechanics.strain_tensor(F['data'],t,m), 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), 'meta': { 'Unit': F['meta']['Unit'], @@ -790,14 +960,14 @@ class DADF5(): } requested = [{'label':F,'arg':'F'}] - + self.__add_generic_pointwise(__add_strain_tensor,requested,{'t':t,'m':m}) def __add_generic_pointwise(self,func,datasets_requested,extra_args={}): """ General function to add pointwise data. - + Parameters ---------- func : function @@ -811,14 +981,14 @@ class DADF5(): def job(args): """Call function with input data + extra arguments, returns results + group.""" args['results'].put({**args['func'](**args['in']),'group':args['group']}) - + N_threads = 1 # ToDo: should be a parameter results = Queue(N_threads) pool = util.ThreadPool(N_threads) N_added = N_threads + 1 - + todo = [] # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task for group in self.groups_with_datasets([d['label'] for d in datasets_requested]): @@ -831,18 +1001,18 @@ class DADF5(): datasets_in[d['arg']] = {'data': data, 'meta' : meta, 'label' : d['label']} todo.append({'in':{**datasets_in,**extra_args},'func':func,'group':group,'results':results}) - + pool.map(job, todo[:N_added]) # initialize N_not_calculated = len(todo) - while N_not_calculated > 0: + while N_not_calculated > 0: result = results.get() with h5py.File(self.fname,'a') as f: # write to file dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) for k in result['meta'].keys(): dataset_out.attrs[k] = result['meta'][k].encode() N_not_calculated-=1 - + if N_added < len(todo): # add more jobs pool.add_task(job,todo[N_added]) N_added +=1 @@ -853,7 +1023,7 @@ class DADF5(): def to_vtk(self,labels,mode='Cell'): """ Export to vtk cell/point data. - + Parameters ---------- labels : str or list of @@ -866,24 +1036,24 @@ class DADF5(): if mode=='Cell': if self.structured: - + coordArray = [vtk.vtkDoubleArray(),vtk.vtkDoubleArray(),vtk.vtkDoubleArray()] for dim in [0,1,2]: for c in np.linspace(0,self.size[dim],1+self.grid[dim]): coordArray[dim].InsertNextValue(c) - + vtk_geom = vtk.vtkRectilinearGrid() vtk_geom.SetDimensions(*(self.grid+1)) vtk_geom.SetXCoordinates(coordArray[0]) vtk_geom.SetYCoordinates(coordArray[1]) vtk_geom.SetZCoordinates(coordArray[2]) - + else: - + nodes = vtk.vtkPoints() with h5py.File(self.fname,'r') as f: nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True)) - + vtk_geom = vtk.vtkUnstructuredGrid() vtk_geom.SetPoints(nodes) vtk_geom.Allocate(f['/geometry/T_c'].shape[0]) @@ -910,22 +1080,22 @@ class DADF5(): elif mode == 'Point': Points = vtk.vtkPoints() - Vertices = vtk.vtkCellArray() + Vertices = vtk.vtkCellArray() for c in self.cell_coordinates(): pointID = Points.InsertNextPoint(c) Vertices.InsertNextCell(1) Vertices.InsertCellPoint(pointID) - + vtk_geom = vtk.vtkPolyData() vtk_geom.SetPoints(Points) vtk_geom.SetVerts(Vertices) vtk_geom.Modified() - + N_digits = int(np.floor(np.log10(int(self.increments[-1][3:]))))+1 - + for i,inc in enumerate(self.iter_visible('increments')): vtk_data = [] - + materialpoints_backup = self.visible['materialpoints'].copy() self.set_visible('materialpoints',False) for label in (labels if isinstance(labels,list) else [labels]): @@ -983,8 +1153,8 @@ class DADF5(): vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) vtk_geom.GetCellData().AddArray(vtk_data[-1]) self.set_visible('constituents',constituents_backup) - - if mode=='Cell': + + if mode=='Cell': writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ vtk.vtkXMLUnstructuredGridWriter() x = self.get_dataset_location('u_n') @@ -995,11 +1165,11 @@ class DADF5(): elif mode == 'Point': writer = vtk.vtkXMLPolyDataWriter() - + file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0], inc[3:].zfill(N_digits), writer.GetDefaultFileExtension()) - + writer.SetCompressorTypeToZLib() writer.SetDataModeToBinary() writer.SetFileName(file_out) From e3753e94447aca998e950a0bf44ba1da3bf233cd Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 16:11:57 +0100 Subject: [PATCH 04/51] use central functionality --- processing/post/addSpectralDecomposition.py | 73 +++++---------------- 1 file changed, 16 insertions(+), 57 deletions(-) diff --git a/processing/post/addSpectralDecomposition.py b/processing/post/addSpectralDecomposition.py index c711c6a45..5909ca147 100755 --- a/processing/post/addSpectralDecomposition.py +++ b/processing/post/addSpectralDecomposition.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import numpy as np @@ -33,69 +34,27 @@ parser.add_option('--no-check', parser.set_defaults(rh = True, ) + (options,filenames) = parser.parse_args() - -if options.tensor is None: - parser.error('no data column specified.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - table.head_read() + for tensor in options.tensor: + t = table.get(tensor).reshape(-1,3,3) + (u,v) = np.linalg.eigh(damask.mechanics.symmetric(t)) + if options.rh: v[np.linalg.det(v) < 0.0,:,2] *= -1.0 -# ------------------------------------------ assemble header 1 ------------------------------------ + for i,o in enumerate(['Min','Mid','Max']): + table.add('eigval{}({})'.format(o,tensor),u[:,i], + scriptID+' '+' '.join(sys.argv[1:])) - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []}, - } - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['column'].append(table.label_index(what)) - for order in ['Min','Mid','Max']: - table.labels_append(['eigval{}({})'.format(order,what)]) # extend ASCII header with new labels - for order in ['Min','Mid','Max']: - table.labels_append(['{}_eigvec{}({})'.format(i+1,order,what) for i in range(3)]) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header 2 ------------------------------------ - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ----------------------------------------- - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - (u,v) = np.linalg.eigh(np.array(list(map(float,table.data[column:column+data['dim']]))).reshape(data['shape'])) - if options.rh and np.dot(np.cross(v[:,0], v[:,1]), v[:,2]) < 0.0 : v[:, 2] *= -1.0 # ensure right-handed eigenvector basis - table.data_append(list(u)) # vector of max,mid,min eigval - table.data_append(list(v.transpose().reshape(data['dim']))) # 3x3=9 combo vector of max,mid,min eigvec coordinates - outputAlive = table.data_write() # output processed line in accordance with column labeling - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + for i,o in enumerate(['Min','Mid','Max']): + table.add('eigvec{}({})'.format(o,tensor),v[:,:,i], + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) From ad062ada6be482f3c2f7362b4335d83600c1877b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 16:26:56 +0100 Subject: [PATCH 05/51] option (as in addSpectralDecomposition) --- python/damask/dadf5.py | 2 +- python/damask/mechanics.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 88ccd05fe..d1a66ab10 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -648,7 +648,7 @@ class DADF5(): 'data': mechanics.eigenvectors(x['data']), 'label': 'v({})'.format(x['label']), 'meta' : { - 'Unit': '1', + 'Unit': '1', 'Description': 'Eigenvectors of {} ({})'.format(x['label'],x['meta']['Description']), 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) } diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 8d89a5e81..79245e887 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -51,7 +51,7 @@ def eigenvalues(x): return np.linalg.eigvalsh(symmetric(x)) -def eigenvectors(x): +def eigenvectors(x,RHS=False): """ Return eigenvectors of a symmetric tensor. @@ -61,9 +61,17 @@ def eigenvectors(x): ---------- x : numpy.array of shape (:,3,3) or (3,3) Symmetric tensor of which the eigenvectors are computed. + RHS: bool, optional + Enforce right-handed coordinate system. Default is False. """ (u,v) = np.linalg.eigh(symmetric(x)) + + if RHS: + if np.shape(x) == (3,3): + if np.linalg.det(v) < 0.0: v[:,2] *= -1.0 + else: + v[np.linalg.det(v) < 0.0,:,2] *= -1.0 return v From 118c03c48589fdf130f875aa6fac0b54762b5679 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 16:55:12 +0100 Subject: [PATCH 06/51] tests for new functionality --- python/damask/dadf5.py | 82 +++++++++++++++++----------------- python/tests/test_DADF5.py | 27 +++++++++++ python/tests/test_mechanics.py | 13 ++++++ 3 files changed, 81 insertions(+), 41 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index d1a66ab10..9f4d39b2f 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -752,6 +752,47 @@ class DADF5(): self.__add_generic_pointwise(__add_Mises,requested) + def add_norm(self,x,ord=None): + """ + Add the norm of vector or tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a vector or tensor. + ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional + Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. + + """ + def __add_norm(x,ord): + + o = ord + if len(x['data'].shape) == 2: + axis = 1 + t = 'vector' + if o is None: o = 2 + elif len(x['data'].shape) == 3: + axis = (1,2) + t = 'tensor' + if o is None: o = 'fro' + else: + raise ValueError + + return { + 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), + 'label': '|{}|_{}'.format(x['label'],o), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_norm v{}'.format(version) + } + } + + requested = [{'label':x,'arg':'x'}] + + self.__add_generic_pointwise(__add_norm,requested,{'ord':ord}) + + def add_PK2(self,F='F',P='P'): """ Add 2. Piola-Kirchhoff calculated from 1. Piola-Kirchhoff stress and deformation gradient. @@ -830,47 +871,6 @@ class DADF5(): self.__add_generic_pointwise(__addPole,requested,{'pole':pole}) - def add_norm(self,x,ord=None): - """ - Add the norm of vector or tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a vector or tensor. - ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional - Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. - - """ - def __add_norm(x,ord): - - o = ord - if len(x['data'].shape) == 2: - axis = 1 - t = 'vector' - if o is None: o = 2 - elif len(x['data'].shape) == 3: - axis = (1,2) - t = 'tensor' - if o is None: o = 'fro' - else: - raise ValueError - - return { - 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), - 'label': '|{}|_{}'.format(x['label'],o), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_norm v{}'.format(version) - } - } - - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_norm,requested,{'ord':ord}) - - def add_principal_components(self,x): """ Add principal components of symmetric tensor. diff --git a/python/tests/test_DADF5.py b/python/tests/test_DADF5.py index 669ce8446..7169c15a6 100644 --- a/python/tests/test_DADF5.py +++ b/python/tests/test_DADF5.py @@ -73,6 +73,33 @@ class TestDADF5: in_file = default.read_dataset(loc['s_P'],0) assert np.allclose(in_memory,in_file) + def test_add_eigenvalues(self,default): + default.add_Cauchy('P','F') + default.add_eigenvalues('sigma') + loc = {'sigma' :default.get_dataset_location('sigma'), + 'lambda(sigma)':default.get_dataset_location('lambda(sigma)')} + in_memory = mechanics.eigenvalues(default.read_dataset(loc['sigma'],0)) + in_file = default.read_dataset(loc['lambda(sigma)'],0) + assert np.allclose(in_memory,in_file) + + def test_add_eigenvectors(self,default): + default.add_Cauchy('P','F') + default.add_eigenvectors('sigma') + loc = {'sigma' :default.get_dataset_location('sigma'), + 'v(sigma)':default.get_dataset_location('v(sigma)')} + in_memory = mechanics.eigenvectors(default.read_dataset(loc['sigma'],0)) + in_file = default.read_dataset(loc['v(sigma)'],0) + assert np.allclose(in_memory,in_file) + + def test_add_maximum_shear(self,default): + default.add_Cauchy('P','F') + default.add_maximum_shear('sigma') + loc = {'sigma' :default.get_dataset_location('sigma'), + 'max_shear(sigma)':default.get_dataset_location('max_shear(sigma)')} + in_memory = mechanics.maximum_shear(default.read_dataset(loc['sigma'],0)).reshape(-1,1) + in_file = default.read_dataset(loc['max_shear(sigma)'],0) + assert np.allclose(in_memory,in_file) + def test_add_norm(self,default): default.add_norm('F',1) loc = {'F': default.get_dataset_location('F'), diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index a13310dce..d0ec7219d 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -211,3 +211,16 @@ class TestMechanics: s = np.random.randint(self.n) for i in range(3): assert np.allclose(np.dot(A[s]-lambd[s,i]*np.eye(3),x[s,:,i]),.0) + + def test_eigenvectors_RHS(self): + """Ensure that RHS coordinate system does only change sign of determinant.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False)) + RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True)) + s = np.random.randint(self.n) + assert np.allclose(np.abs(LRHS),RHS) + + def test_spherical_no_shear(self): + """Ensure that sherical stress has max shear of 0.0.""" + A = mechanics.spherical_part(mechanics.symmetric(np.random.random((self.n,3,3))),True) + assert np.allclose(mechanics.maximum_shear(A),0.0) From 5235c27ad0fc074c9ff94302b0145aa658ac4ac1 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 17:56:20 +0100 Subject: [PATCH 07/51] making new mechanics functions available for DADF5 + testing them --- python/damask/dadf5.py | 69 ++++++++++++++++++++++++++------------ python/tests/test_DADF5.py | 42 ++++++++++++++++++++--- 2 files changed, 86 insertions(+), 25 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 9f4d39b2f..b6e09813b 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -516,7 +516,7 @@ class DADF5(): self.__add_generic_pointwise(__add_calculation,requested,pass_through) - def add_Cauchy(self,P='P',F='F'): + def add_Cauchy(self,F='F',P='P'): """ Add Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. @@ -537,8 +537,7 @@ class DADF5(): 'Unit': P['meta']['Unit'], 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], P['meta']['Description'])+\ - 'and deformation gradient {} ({})'.format(F['label'], - F['meta']['Description']), + 'and {} ({})'.format(F['label'],F['meta']['Description']), 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) } } @@ -814,8 +813,7 @@ class DADF5(): 'Unit': P['meta']['Unit'], 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], P['meta']['Description'])+\ - 'and deformation gradient {} ({})'.format(F['label'], - F['meta']['Description']), + 'and {} ({})'.format(F['label'],F['meta']['Description']), 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) } } @@ -871,33 +869,31 @@ class DADF5(): self.__add_generic_pointwise(__addPole,requested,{'pole':pole}) - def add_principal_components(self,x): + def add_rotational_part(self,F='F'): """ - Add principal components of symmetric tensor. - - The principal components are sorted in descending order, each repeated according to its multiplicity. + Add rotational part of a deformation gradient. Parameters ---------- - x : str - Label of the dataset containing a symmetric tensor. + F : str + Label of the dataset containing a deformation gradient. Default value is ‘F’. """ - def __add_principal_components(x): + def __add_rotational_part(F): return { - 'data': mechanics.principal_components(x['data']), - 'label': 'lambda_{}'.format(x['label']), + 'data': mechanics.rotational_part(F['data']), + 'label': 'R({})'.format(F['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Pricipal components of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_principal_components v{}'.format(version) + 'Unit': F['meta']['Unit'], + 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_rotational_part v{}'.format(version) } } - requested = [{'label':x,'arg':'x'}] + requested = [{'label':F,'arg':'F'}] - self.__add_generic_pointwise(__add_principal_components,requested) + self.__add_generic_pointwise(__add_rotational_part,requested) def add_spherical(self,x): @@ -941,8 +937,8 @@ class DADF5(): F : str, optional Label of the dataset containing the deformation gradient. Default value is ‘F’. t : {‘V’, ‘U’}, optional - Type of the polar decomposition, ‘V’ for right stretch tensor and ‘U’ for left stretch tensor. - Defaults value is ‘U’. + Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. + Default value is ‘U’. m : float, optional Order of the strain calculation. Default value is ‘0.0’. @@ -964,6 +960,37 @@ class DADF5(): self.__add_generic_pointwise(__add_strain_tensor,requested,{'t':t,'m':m}) + def add_stretch_tensor(self,F='F',t='U'): + """ + Add stretch tensor calculated from a deformation gradient. + + Parameters + ---------- + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. + t : {‘V’, ‘U’}, optional + Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. + Default value is ‘U’. + + """ + def __add_stretch_tensor(F,t): + + return { + 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), + 'label': '{}({})'.format(t,F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', + F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_stretch_tensor v{}'.format(version) + } + } + + requested = [{'label':F,'arg':'F'}] + + self.__add_generic_pointwise(__add_stretch_tensor,requested,{'t':t}) + + def __add_generic_pointwise(self,func,datasets_requested,extra_args={}): """ General function to add pointwise data. diff --git a/python/tests/test_DADF5.py b/python/tests/test_DADF5.py index 7169c15a6..adc23a97a 100644 --- a/python/tests/test_DADF5.py +++ b/python/tests/test_DADF5.py @@ -48,7 +48,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_Cauchy(self,default): - default.add_Cauchy('P','F') + default.add_Cauchy('F','P') loc = {'F': default.get_dataset_location('F'), 'P': default.get_dataset_location('P'), 'sigma':default.get_dataset_location('sigma')} @@ -74,7 +74,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_eigenvalues(self,default): - default.add_Cauchy('P','F') + default.add_Cauchy('F','P') default.add_eigenvalues('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'lambda(sigma)':default.get_dataset_location('lambda(sigma)')} @@ -83,7 +83,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_eigenvectors(self,default): - default.add_Cauchy('P','F') + default.add_Cauchy('F','P') default.add_eigenvectors('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'v(sigma)':default.get_dataset_location('v(sigma)')} @@ -92,7 +92,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_maximum_shear(self,default): - default.add_Cauchy('P','F') + default.add_Cauchy('F','P') default.add_maximum_shear('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'max_shear(sigma)':default.get_dataset_location('max_shear(sigma)')} @@ -108,6 +108,24 @@ class TestDADF5: in_file = default.read_dataset(loc['|F|_1'],0) assert np.allclose(in_memory,in_file) + def test_add_PK2(self,default): + default.add_PK2('F','P') + loc = {'F':default.get_dataset_location('F'), + 'P':default.get_dataset_location('P'), + 'S':default.get_dataset_location('S')} + in_memory = mechanics.PK2(default.read_dataset(loc['F'],0), + default.read_dataset(loc['P'],0)) + in_file = default.read_dataset(loc['S'],0) + assert np.allclose(in_memory,in_file) + + def test_add_rotational_part(self,default): + default.add_rotational_part('F') + loc = {'F': default.get_dataset_location('F'), + 'R(F)': default.get_dataset_location('R(F)')} + in_memory = mechanics.rotational_part(default.read_dataset(loc['F'],0)) + in_file = default.read_dataset(loc['R(F)'],0) + assert np.allclose(in_memory,in_file) + def test_add_spherical(self,default): default.add_spherical('P') loc = {'P': default.get_dataset_location('P'), @@ -115,3 +133,19 @@ class TestDADF5: in_memory = mechanics.spherical_part(default.read_dataset(loc['P'],0)).reshape(-1,1) in_file = default.read_dataset(loc['p_P'],0) assert np.allclose(in_memory,in_file) + + def test_add_stretch_right(self,default): + default.add_stretch_tensor('F','U') + loc = {'F': default.get_dataset_location('F'), + 'U(F)': default.get_dataset_location('U(F)')} + in_memory = mechanics.right_stretch(default.read_dataset(loc['F'],0)) + in_file = default.read_dataset(loc['U(F)'],0) + assert np.allclose(in_memory,in_file) + + def test_add_stretch_left(self,default): + default.add_stretch_tensor('F','V') + loc = {'F': default.get_dataset_location('F'), + 'V(F)': default.get_dataset_location('V(F)')} + in_memory = mechanics.left_stretch(default.read_dataset(loc['F'],0)) + in_file = default.read_dataset(loc['V(F)'],0) + assert np.allclose(in_memory,in_file) From a70721df530ad5b9c3c750d89adf5240687bdf77 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Feb 2020 20:09:24 +0100 Subject: [PATCH 08/51] write out proper Miller indices --- python/damask/dadf5.py | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index b6e09813b..67ca1d739 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1,4 +1,6 @@ -from queue import Queue +from fractions import Fraction +from functools import reduce +from queue import Queue import re import glob import os @@ -11,6 +13,7 @@ import numpy as np from . import util from . import version from . import mechanics +from . import Orientation # ------------------------------------------------------------------ class DADF5(): @@ -657,31 +660,49 @@ class DADF5(): self.__add_generic_pointwise(__add_eigenvector,requested) - def add_IPFcolor(self,q,p): + def add_IPFcolor(self,q,p=[0,0,1]): """ Add RGB color tuple of inverse pole figure (IPF) color. Parameters ---------- - orientation : str + q : str Label of the dataset containing the orientation data as quaternions. - pole : list of int + p : list of int Pole direction as Miller indices. Default value is [0, 0, 1]. """ def __add_IPFcolor(orientation,pole): + MAX_DENOMINATOR = 1000 + def lcm(a, b): + """Least common multiple.""" + return a * b // np.gcd(a, b) + + def get_square_denominator(x): + """returns the denominator of the square of a number.""" + return Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator + + def scale_to_Miller(v): + """Factor to scale vector to integers.""" + denominators = [int(get_square_denominator(i)) for i in v] + s = reduce(lcm, denominators) ** 0.5 + m = (np.array(v)*s).astype(np.int) + return m//reduce(np.gcd,m) + + m = scale_to_Miller(pole) + lattice = orientation['meta']['Lattice'] unit_pole = pole/np.linalg.norm(pole) colors = np.empty((len(orientation['data']),3),np.uint8) for i,q in enumerate(orientation['data']): - rot = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) - orientation = Orientation(rot,lattice = lattice).reduced() - colors[i] = np.uint8(orientation.IPFcolor(unit_pole)*255) + o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() + colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) + return { 'data': colors, - 'label': 'IPFcolor_[{} {} {}]'.format(*pole), + 'label': 'IPFcolor_[{} {} {}]'.format(*m), 'meta' : { 'Unit': 'RGB (8bit)', 'Lattice': lattice, @@ -690,9 +711,9 @@ class DADF5(): } } - requested = [{'label':'orientation','arg':'orientation'}] + requested = [{'label':q,'arg':'orientation'}] - self.__add_generic_pointwise(__add_IPFcolor,requested,{'pole':pole}) + self.__add_generic_pointwise(__add_IPFcolor,requested,{'pole':p}) def add_maximum_shear(self,x): From 36c1744a59eea39fee349da5b676dab1f2a73da5 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 16 Feb 2020 09:15:12 +0100 Subject: [PATCH 09/51] proper indentation --- python/tests/test_mechanics.py | 286 ++++++++++++++++----------------- 1 file changed, 143 insertions(+), 143 deletions(-) diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index d0ec7219d..91e3d2d9e 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -8,219 +8,219 @@ class TestMechanics: def test_vectorize_Cauchy(self): - P = np.random.random((self.n,3,3)) - F = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Cauchy(F,P)[self.c], - mechanics.Cauchy(F[self.c],P[self.c])) + P = np.random.random((self.n,3,3)) + F = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Cauchy(F,P)[self.c], + mechanics.Cauchy(F[self.c],P[self.c])) def test_vectorize_deviatoric_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.deviatoric_part(x)[self.c], - mechanics.deviatoric_part(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.deviatoric_part(x)[self.c], + mechanics.deviatoric_part(x[self.c])) def test_vectorize_eigenvalues(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.eigenvalues(x)[self.c], - mechanics.eigenvalues(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.eigenvalues(x)[self.c], + mechanics.eigenvalues(x[self.c])) def test_vectorize_eigenvectors(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.eigenvectors(x)[self.c], - mechanics.eigenvectors(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.eigenvectors(x)[self.c], + mechanics.eigenvectors(x[self.c])) def test_vectorize_left_stretch(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.left_stretch(x)[self.c], - mechanics.left_stretch(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.left_stretch(x)[self.c], + mechanics.left_stretch(x[self.c])) def test_vectorize_maximum_shear(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.maximum_shear(x)[self.c], - mechanics.maximum_shear(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.maximum_shear(x)[self.c], + mechanics.maximum_shear(x[self.c])) def test_vectorize_Mises_strain(self): - epsilon = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Mises_strain(epsilon)[self.c], - mechanics.Mises_strain(epsilon[self.c])) + epsilon = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_strain(epsilon)[self.c], + mechanics.Mises_strain(epsilon[self.c])) def test_vectorize_Mises_stress(self): - sigma = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Mises_stress(sigma)[self.c], - mechanics.Mises_stress(sigma[self.c])) + sigma = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_stress(sigma)[self.c], + mechanics.Mises_stress(sigma[self.c])) def test_vectorize_PK2(self): - F = np.random.random((self.n,3,3)) - P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.PK2(F,P)[self.c], - mechanics.PK2(F[self.c],P[self.c])) + F = np.random.random((self.n,3,3)) + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.PK2(F,P)[self.c], + mechanics.PK2(F[self.c],P[self.c])) def test_vectorize_right_stretch(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.right_stretch(x)[self.c], - mechanics.right_stretch(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.right_stretch(x)[self.c], + mechanics.right_stretch(x[self.c])) def test_vectorize_rotational_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.rotational_part(x)[self.c], - mechanics.rotational_part(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.rotational_part(x)[self.c], + mechanics.rotational_part(x[self.c])) def test_vectorize_spherical_part(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.spherical_part(x,True)[self.c], - mechanics.spherical_part(x[self.c],True)) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.spherical_part(x,True)[self.c], + mechanics.spherical_part(x[self.c],True)) def test_vectorize_strain_tensor(self): - F = np.random.random((self.n,3,3)) - t = ['V','U'][np.random.randint(0,2)] - m = np.random.random()*10. -5.0 - assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c], - mechanics.strain_tensor(F[self.c],t,m)) + F = np.random.random((self.n,3,3)) + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*10. -5.0 + assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c], + mechanics.strain_tensor(F[self.c],t,m)) def test_vectorize_symmetric(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.symmetric(x)[self.c], - mechanics.symmetric(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.symmetric(x)[self.c], + mechanics.symmetric(x[self.c])) def test_vectorize_transpose(self): - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.transpose(x)[self.c], - mechanics.transpose(x[self.c])) + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.transpose(x)[self.c], + mechanics.transpose(x[self.c])) def test_Cauchy(self): - """Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" - P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Cauchy(np.broadcast_to(np.eye(3),(self.n,3,3)),P), - mechanics.symmetric(P)) + """Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Cauchy(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + mechanics.symmetric(P)) def test_polar_decomposition(self): - """F = RU = VR.""" - F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) - R = mechanics.rotational_part(F) - V = mechanics.left_stretch(F) - U = mechanics.right_stretch(F) - assert np.allclose(np.matmul(R,U), - np.matmul(V,R)) + """F = RU = VR.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) + R = mechanics.rotational_part(F) + V = mechanics.left_stretch(F) + U = mechanics.right_stretch(F) + assert np.allclose(np.matmul(R,U), + np.matmul(V,R)) def test_PK2(self): - """Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" - P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.PK2(np.broadcast_to(np.eye(3),(self.n,3,3)),P), - mechanics.symmetric(P)) + """Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" + P = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.PK2(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + mechanics.symmetric(P)) def test_strain_tensor_no_rotation(self): - """Ensure that left and right stretch give same results for no rotation.""" - F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) - m = np.random.random()*20.0-10.0 - assert np.allclose(mechanics.strain_tensor(F,'U',m), - mechanics.strain_tensor(F,'V',m)) + """Ensure that left and right stretch give same results for no rotation.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) + m = np.random.random()*20.0-10.0 + assert np.allclose(mechanics.strain_tensor(F,'U',m), + mechanics.strain_tensor(F,'V',m)) def test_strain_tensor_rotation_equivalence(self): - """Ensure that left and right strain differ only by a rotation.""" - F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25) - m = np.random.random()*5.0-2.5 - assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)), - np.linalg.det(mechanics.strain_tensor(F,'V',m))) + """Ensure that left and right strain differ only by a rotation.""" + F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25) + m = np.random.random()*5.0-2.5 + assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)), + np.linalg.det(mechanics.strain_tensor(F,'V',m))) def test_strain_tensor_rotation(self): - """Ensure that pure rotation results in no strain.""" - F = mechanics.rotational_part(np.random.random((self.n,3,3))) - t = ['V','U'][np.random.randint(0,2)] - m = np.random.random()*2.0 - 1.0 - assert np.allclose(mechanics.strain_tensor(F,t,m), - 0.0) + """Ensure that pure rotation results in no strain.""" + F = mechanics.rotational_part(np.random.random((self.n,3,3))) + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*2.0 - 1.0 + assert np.allclose(mechanics.strain_tensor(F,t,m), + 0.0) def test_rotation_determinant(self): - """ - Ensure that the determinant of the rotational part is +- 1. + """ + Ensure that the determinant of the rotational part is +- 1. - Should be +1, but random F might contain a reflection. - """ - x = np.random.random((self.n,3,3)) - assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))), - 1.0) + Should be +1, but random F might contain a reflection. + """ + x = np.random.random((self.n,3,3)) + assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))), + 1.0) def test_spherical_deviatoric_part(self): - """Ensure that full tensor is sum of spherical and deviatoric part.""" - x = np.random.random((self.n,3,3)) - sph = mechanics.spherical_part(x,True) - assert np.allclose(sph + mechanics.deviatoric_part(x), - x) + """Ensure that full tensor is sum of spherical and deviatoric part.""" + x = np.random.random((self.n,3,3)) + sph = mechanics.spherical_part(x,True) + assert np.allclose(sph + mechanics.deviatoric_part(x), + x) def test_deviatoric_Mises(self): - """Ensure that Mises equivalent stress depends only on deviatoric part.""" - x = np.random.random((self.n,3,3)) - full = mechanics.Mises_stress(x) - dev = mechanics.Mises_stress(mechanics.deviatoric_part(x)) - assert np.allclose(full, - dev) + """Ensure that Mises equivalent stress depends only on deviatoric part.""" + x = np.random.random((self.n,3,3)) + full = mechanics.Mises_stress(x) + dev = mechanics.Mises_stress(mechanics.deviatoric_part(x)) + assert np.allclose(full, + dev) def test_spherical_mapping(self): - """Ensure that mapping to tensor is correct.""" - x = np.random.random((self.n,3,3)) - tensor = mechanics.spherical_part(x,True) - scalar = mechanics.spherical_part(x) - assert np.allclose(np.linalg.det(tensor), - scalar**3.0) + """Ensure that mapping to tensor is correct.""" + x = np.random.random((self.n,3,3)) + tensor = mechanics.spherical_part(x,True) + scalar = mechanics.spherical_part(x) + assert np.allclose(np.linalg.det(tensor), + scalar**3.0) def test_spherical_Mises(self): - """Ensure that Mises equivalent strrain of spherical strain is 0.""" - x = np.random.random((self.n,3,3)) - sph = mechanics.spherical_part(x,True) - assert np.allclose(mechanics.Mises_strain(sph), - 0.0) + """Ensure that Mises equivalent strrain of spherical strain is 0.""" + x = np.random.random((self.n,3,3)) + sph = mechanics.spherical_part(x,True) + assert np.allclose(mechanics.Mises_strain(sph), + 0.0) def test_symmetric(self): - """Ensure that a symmetric tensor is half of the sum of a tensor and its transpose.""" - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.symmetric(x)*2.0, - mechanics.transpose(x)+x) + """Ensure that a symmetric tensor is half of the sum of a tensor and its transpose.""" + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.symmetric(x)*2.0, + mechanics.transpose(x)+x) def test_transpose(self): - """Ensure that a symmetric tensor equals its transpose.""" - x = mechanics.symmetric(np.random.random((self.n,3,3))) - assert np.allclose(mechanics.transpose(x), - x) + """Ensure that a symmetric tensor equals its transpose.""" + x = mechanics.symmetric(np.random.random((self.n,3,3))) + assert np.allclose(mechanics.transpose(x), + x) def test_Mises(self): - """Ensure that equivalent stress is 3/2 of equivalent strain.""" - x = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x), - 1.5) + """Ensure that equivalent stress is 3/2 of equivalent strain.""" + x = np.random.random((self.n,3,3)) + assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x), + 1.5) def test_eigenvalues(self): - """Ensure that the characteristic polynomial can be solved.""" - A = mechanics.symmetric(np.random.random((self.n,3,3))) - lambd = mechanics.eigenvalues(A) - s = np.random.randint(self.n) - for i in range(3): - assert np.allclose(np.linalg.det(A[s]-lambd[s,i]*np.eye(3)),.0) + """Ensure that the characteristic polynomial can be solved.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + lambd = mechanics.eigenvalues(A) + s = np.random.randint(self.n) + for i in range(3): + assert np.allclose(np.linalg.det(A[s]-lambd[s,i]*np.eye(3)),.0) def test_eigenvalues_and_vectors(self): - """Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial.""" - A = mechanics.symmetric(np.random.random((self.n,3,3))) - lambd = mechanics.eigenvalues(A) - x = mechanics.eigenvectors(A) - s = np.random.randint(self.n) - for i in range(3): - assert np.allclose(np.dot(A[s]-lambd[s,i]*np.eye(3),x[s,:,i]),.0) + """Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + lambd = mechanics.eigenvalues(A) + x = mechanics.eigenvectors(A) + s = np.random.randint(self.n) + for i in range(3): + assert np.allclose(np.dot(A[s]-lambd[s,i]*np.eye(3),x[s,:,i]),.0) def test_eigenvectors_RHS(self): - """Ensure that RHS coordinate system does only change sign of determinant.""" - A = mechanics.symmetric(np.random.random((self.n,3,3))) - LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False)) - RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True)) - s = np.random.randint(self.n) - assert np.allclose(np.abs(LRHS),RHS) + """Ensure that RHS coordinate system does only change sign of determinant.""" + A = mechanics.symmetric(np.random.random((self.n,3,3))) + LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False)) + RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True)) + s = np.random.randint(self.n) + assert np.allclose(np.abs(LRHS),RHS) def test_spherical_no_shear(self): - """Ensure that sherical stress has max shear of 0.0.""" - A = mechanics.spherical_part(mechanics.symmetric(np.random.random((self.n,3,3))),True) - assert np.allclose(mechanics.maximum_shear(A),0.0) + """Ensure that sherical stress has max shear of 0.0.""" + A = mechanics.spherical_part(mechanics.symmetric(np.random.random((self.n,3,3))),True) + assert np.allclose(mechanics.maximum_shear(A),0.0) From 065fc9ffde7b27ab156fe2b32ade1091e2ea2c0c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 16 Feb 2020 09:49:55 +0100 Subject: [PATCH 10/51] using DAMASK_NUM_THREADS controls # workers for add_XXX --- python/damask/dadf5.py | 5 +++-- python/damask/environment.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 67ca1d739..b04e89d3e 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -14,6 +14,7 @@ from . import util from . import version from . import mechanics from . import Orientation +from . import Environment # ------------------------------------------------------------------ class DADF5(): @@ -1030,8 +1031,8 @@ class DADF5(): """Call function with input data + extra arguments, returns results + group.""" args['results'].put({**args['func'](**args['in']),'group':args['group']}) - - N_threads = 1 # ToDo: should be a parameter + env = Environment() + N_threads = int(env.options['DAMASK_NUM_THREADS']) results = Queue(N_threads) pool = util.ThreadPool(N_threads) diff --git a/python/damask/environment.py b/python/damask/environment.py index 2fde2c329..4c9ab762c 100644 --- a/python/damask/environment.py +++ b/python/damask/environment.py @@ -8,7 +8,7 @@ class Environment(): def __init__(self): """Read and provide values of DAMASK configuration.""" self.options = {} - self.get_options() + self.__get_options() def relPath(self,relative = '.'): return os.path.join(self.rootDir(),relative) @@ -16,7 +16,7 @@ class Environment(): def rootDir(self): return os.path.normpath(os.path.join(os.path.realpath(__file__),'../../../')) - def get_options(self): + def __get_options(self): for item in ['DAMASK_NUM_THREADS', 'MSC_ROOT', 'MARC_VERSION', From 95cfa3f1736ac277ca3ee2d5c7dcc8410ce0725f Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 16 Feb 2020 10:04:33 +0100 Subject: [PATCH 11/51] more tests... --- python/tests/test_DADF5.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/python/tests/test_DADF5.py b/python/tests/test_DADF5.py index adc23a97a..a2f6cc920 100644 --- a/python/tests/test_DADF5.py +++ b/python/tests/test_DADF5.py @@ -100,6 +100,27 @@ class TestDADF5: in_file = default.read_dataset(loc['max_shear(sigma)'],0) assert np.allclose(in_memory,in_file) + def test_add_Mises_strain(self,default): + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*2.0 - 1.0 + default.add_strain_tensor('F',t,m) + label = 'epsilon_{}^{}(F)'.format(t,m) + default.add_Mises(label) + loc = {label :default.get_dataset_location(label), + label+'_vM':default.get_dataset_location(label+'_vM')} + in_memory = mechanics.Mises_strain(default.read_dataset(loc[label],0)).reshape(-1,1) + in_file = default.read_dataset(loc[label+'_vM'],0) + assert np.allclose(in_memory,in_file) + + def test_add_Mises_stress(self,default): + default.add_Cauchy('F','P') + default.add_Mises('sigma') + loc = {'sigma' :default.get_dataset_location('sigma'), + 'sigma_vM':default.get_dataset_location('sigma_vM')} + in_memory = mechanics.Mises_stress(default.read_dataset(loc['sigma'],0)).reshape(-1,1) + in_file = default.read_dataset(loc['sigma_vM'],0) + assert np.allclose(in_memory,in_file) + def test_add_norm(self,default): default.add_norm('F',1) loc = {'F': default.get_dataset_location('F'), @@ -134,6 +155,17 @@ class TestDADF5: in_file = default.read_dataset(loc['p_P'],0) assert np.allclose(in_memory,in_file) + def test_add_strain(self,default): + t = ['V','U'][np.random.randint(0,2)] + m = np.random.random()*2.0 - 1.0 + default.add_strain_tensor('F',t,m) + label = 'epsilon_{}^{}(F)'.format(t,m) + loc = {'F': default.get_dataset_location('F'), + label: default.get_dataset_location(label)} + in_memory = mechanics.strain_tensor(default.read_dataset(loc['F'],0),t,m) + in_file = default.read_dataset(loc[label],0) + assert np.allclose(in_memory,in_file) + def test_add_stretch_right(self,default): default.add_stretch_tensor('F','U') loc = {'F': default.get_dataset_location('F'), From 251d55fe098cd18fccec40cfacba9c2b64c0d485 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 16 Feb 2020 11:00:09 +0100 Subject: [PATCH 12/51] current thread pool is useless for performance https://stackoverflow.com/questions/33969151 https://stackoverflow.com/questions/10789042 --- python/damask/dadf5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index b04e89d3e..33fd51566 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1032,7 +1032,7 @@ class DADF5(): args['results'].put({**args['func'](**args['in']),'group':args['group']}) env = Environment() - N_threads = int(env.options['DAMASK_NUM_THREADS']) + N_threads = 1#int(env.options['DAMASK_NUM_THREADS']) results = Queue(N_threads) pool = util.ThreadPool(N_threads) From 9beca6488c32cabd0df3849a66ff94139e0b6bd1 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 07:21:45 +0100 Subject: [PATCH 13/51] more verbose description --- python/damask/dadf5.py | 80 ++++++++++++++++-------------------------- python/damask/util.py | 33 ++++++++++++++--- 2 files changed, 59 insertions(+), 54 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 33fd51566..5194b3a4a 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1,5 +1,3 @@ -from fractions import Fraction -from functools import reduce from queue import Queue import re import glob @@ -13,6 +11,7 @@ import numpy as np from . import util from . import version from . import mechanics +from . import Rotation from . import Orientation from . import Environment @@ -282,7 +281,7 @@ class DADF5(): def del_visible(self,what,datasets): """ - Delete from active groupse. + Delete from active groupe. Parameters ---------- @@ -661,7 +660,7 @@ class DADF5(): self.__add_generic_pointwise(__add_eigenvector,requested) - def add_IPFcolor(self,q,p=[0,0,1]): + def add_IPFcolor(self,q,p): """ Add RGB color tuple of inverse pole figure (IPF) color. @@ -670,51 +669,36 @@ class DADF5(): q : str Label of the dataset containing the orientation data as quaternions. p : list of int - Pole direction as Miller indices. Default value is [0, 0, 1]. + Pole direction as Miller indices. """ - def __add_IPFcolor(orientation,pole): + def __add_IPFcolor(q,p): - MAX_DENOMINATOR = 1000 - def lcm(a, b): - """Least common multiple.""" - return a * b // np.gcd(a, b) - - def get_square_denominator(x): - """returns the denominator of the square of a number.""" - return Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator - - def scale_to_Miller(v): - """Factor to scale vector to integers.""" - denominators = [int(get_square_denominator(i)) for i in v] - s = reduce(lcm, denominators) ** 0.5 - m = (np.array(v)*s).astype(np.int) - return m//reduce(np.gcd,m) - - m = scale_to_Miller(pole) - - lattice = orientation['meta']['Lattice'] + pole = np.array(p) unit_pole = pole/np.linalg.norm(pole) - colors = np.empty((len(orientation['data']),3),np.uint8) + m = util.scale_to_coprime(pole) + colors = np.empty((len(q['data']),3),np.uint8) - for i,q in enumerate(orientation['data']): + lattice = q['meta']['Lattice'] + + for i,q in enumerate(q['data']): o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() - colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) + colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) return { 'data': colors, - 'label': 'IPFcolor_[{} {} {}]'.format(*m), + 'label': 'IPFcolor_{{{} {} {}>'.format(*m), 'meta' : { 'Unit': 'RGB (8bit)', 'Lattice': lattice, - 'Description': 'Inverse Pole Figure colors', + 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), 'Creator': 'dadf5.py:addIPFcolor v{}'.format(version) } } - requested = [{'label':q,'arg':'orientation'}] + requested = [{'label':q,'arg':'q'}] - self.__add_generic_pointwise(__add_IPFcolor,requested,{'pole':p}) + self.__add_generic_pointwise(__add_IPFcolor,requested,{'p':p}) def add_maximum_shear(self,x): @@ -846,49 +830,47 @@ class DADF5(): self.__add_generic_pointwise(__add_PK2,requested) - def addPole(self,q,p,polar=False): + def add_pole(self,q,p,polar=False): """ - Add coordinates of stereographic projection of given direction (pole) in crystal frame. + Add coordinates of stereographic projection of given pole in crystal frame. Parameters ---------- q : str Label of the dataset containing the crystallographic orientation as a quaternion. p : numpy.array of shape (3) - Pole (direction) in crystal frame. + Pole in crystal frame. polar : bool, optional Give pole in polar coordinates. Default is false. """ - def __addPole(orientation,pole): + def __add_pole(q,p,polar): - pole = np.array(pole) - unit_pole /= np.linalg.norm(pole) - coords = np.empty((len(orientation['data']),2)) + pole = np.array(p) + unit_pole = pole/np.linalg.norm(pole) + m = util.scale_to_coprime(pole) + coords = np.empty((len(q['data']),2)) - for i,q in enumerate(orientation['data']): + for i,q in enumerate(q['data']): o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) rotatedPole = o*pole # rotate pole according to crystal orientation (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection - - if polar is True: - coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] - else: - coords[i] = [x,y] + coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] return { 'data': coords, - 'label': 'Pole', + 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), 'meta' : { 'Unit': '1', - 'Description': 'Coordinates of stereographic projection of given direction (pole) in crystal frame', + 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ + .format('Polar' if polar else 'Cartesian'), 'Creator' : 'dadf5.py:addPole v{}'.format(version) } } - requested = [{'label':'orientation','arg':'orientation'}] + requested = [{'label':q,'arg':'q'}] - self.__add_generic_pointwise(__addPole,requested,{'pole':pole}) + self.__add_generic_pointwise(__add_pole,requested,{'p':p,'polar':polar}) def add_rotational_part(self,F='F'): diff --git a/python/damask/util.py b/python/damask/util.py index 0065daba5..3ac59b78b 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -3,14 +3,18 @@ import time import os import subprocess import shlex +from fractions import Fraction +from functools import reduce from optparse import Option from queue import Queue from threading import Thread +import numpy as np + class bcolors: """ ASCII Colors (Blender code). - + https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python """ @@ -36,7 +40,7 @@ class bcolors: self.BOLD = '' self.UNDERLINE = '' self.CROSSOUT = '' - + # ----------------------------- def srepr(arg,glue = '\n'): @@ -159,11 +163,30 @@ def progressBar(iteration, total, prefix='', bar_length=50): if iteration == total: sys.stderr.write('\n') sys.stderr.flush() - + + +def scale_to_coprime(v): + """Scale vector to co-prime (relatively prime) integers.""" + + MAX_DENOMINATOR = 1000 + + def get_square_denominator(x): + """returns the denominator of the square of a number.""" + return Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator + + def lcm(a, b): + """Least common multiple.""" + return a * b // np.gcd(a, b) + + denominators = [int(get_square_denominator(i)) for i in v] + s = reduce(lcm, denominators) ** 0.5 + m = (np.array(v)*s).astype(np.int) + return m//reduce(np.gcd,m) + class return_message(): """Object with formatted return message.""" - + def __init__(self,message): """ Sets return message. @@ -175,7 +198,7 @@ class return_message(): """ self.message = message - + def __repr__(self): """Return message suitable for interactive shells.""" return srepr(self.message) From effaef46dbc4af0acc0bc584c92402a6a834a380 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 07:39:53 +0100 Subject: [PATCH 14/51] simplified interface --- python/damask/dadf5.py | 118 +++++++++++++++-------------------------- 1 file changed, 43 insertions(+), 75 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 5194b3a4a..c58713e55 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -459,7 +459,7 @@ class DADF5(): Label of the dataset containing a scalar, vector, or tensor. """ - def __add_absolute(x): + def _add_absolute(x): return { 'data': np.abs(x['data']), @@ -471,9 +471,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_absolute,requested) + self.__add_generic_pointwise(_add_absolute,{'x':x}) def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): @@ -497,7 +495,7 @@ class DADF5(): if vectorized is False: raise NotImplementedError - def __add_calculation(**kwargs): + def _add_calculation(**kwargs): formula = kwargs['formula'] for d in re.findall(r'#(.*?)#',formula): @@ -513,10 +511,10 @@ class DADF5(): } } - requested = [{'label':d,'arg':d} for d in set(re.findall(r'#(.*?)#',formula))] # datasets used in the formula - pass_through = {'formula':formula,'label':label,'unit':unit,'description':description} + dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula + args = {'formula':formula,'label':label,'unit':unit,'description':description} - self.__add_generic_pointwise(__add_calculation,requested,pass_through) + self.__add_generic_pointwise(_add_calculation,dataset_mapping,args) def add_Cauchy(self,F='F',P='P'): @@ -531,7 +529,7 @@ class DADF5(): Label of the dataset containing the deformation gradient. Default value is ‘F’. """ - def __add_Cauchy(F,P): + def _add_Cauchy(F,P): return { 'data': mechanics.Cauchy(F['data'],P['data']), @@ -545,10 +543,7 @@ class DADF5(): } } - requested = [{'label':F,'arg':'F'}, - {'label':P,'arg':'P'} ] - - self.__add_generic_pointwise(__add_Cauchy,requested) + self.__add_generic_pointwise(_add_Cauchy,{'F':F,'P':P}) def add_determinant(self,x): @@ -561,7 +556,7 @@ class DADF5(): Label of the dataset containing a tensor. """ - def __add_determinant(x): + def _add_determinant(x): return { 'data': np.linalg.det(x['data']), @@ -573,9 +568,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_determinant,requested) + self.__add_generic_pointwise(_add_determinant,{'x':x}) def add_deviator(self,x): @@ -588,7 +581,7 @@ class DADF5(): Label of the dataset containing a tensor. """ - def __add_deviator(x): + def _add_deviator(x): if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): raise ValueError @@ -603,9 +596,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_deviator,requested) + self.__add_generic_pointwise(_add_deviator,{'x':x}) def add_eigenvalues(self,x): @@ -618,7 +609,7 @@ class DADF5(): Label of the dataset containing a symmetric tensor. """ - def __add_eigenvalue(x): + def _add_eigenvalue(x): return { 'data': mechanics.eigenvalues(x['data']), @@ -629,9 +620,8 @@ class DADF5(): 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) } } - requested = [{'label':x,'arg':'x'}] - self.__add_generic_pointwise(__add_eigenvalue,requested) + self.__add_generic_pointwise(_add_eigenvalue,{'x':x}) def add_eigenvectors(self,x): @@ -644,7 +634,7 @@ class DADF5(): Label of the dataset containing a symmetric tensor. """ - def __add_eigenvector(x): + def _add_eigenvector(x): return { 'data': mechanics.eigenvectors(x['data']), @@ -655,9 +645,8 @@ class DADF5(): 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) } } - requested = [{'label':x,'arg':'x'}] - self.__add_generic_pointwise(__add_eigenvector,requested) + self.__add_generic_pointwise(_add_eigenvector,{'x':x}) def add_IPFcolor(self,q,p): @@ -672,7 +661,7 @@ class DADF5(): Pole direction as Miller indices. """ - def __add_IPFcolor(q,p): + def _add_IPFcolor(q,p): pole = np.array(p) unit_pole = pole/np.linalg.norm(pole) @@ -696,9 +685,7 @@ class DADF5(): } } - requested = [{'label':q,'arg':'q'}] - - self.__add_generic_pointwise(__add_IPFcolor,requested,{'p':p}) + self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'p':p}) def add_maximum_shear(self,x): @@ -711,7 +698,7 @@ class DADF5(): Label of the dataset containing a symmetric tensor. """ - def __add_maximum_shear(x): + def _add_maximum_shear(x): return { 'data': mechanics.maximum_shear(x['data']), @@ -723,9 +710,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_maximum_shear,requested) + self.__add_generic_pointwise(_add_maximum_shear,{'x':x}) def add_Mises(self,x): @@ -738,7 +723,7 @@ class DADF5(): Label of the dataset containing a symmetric stress or strain tensor. """ - def __add_Mises(x): + def _add_Mises(x): t = 'strain' if x['meta']['Unit'] == '1' else \ 'stress' @@ -752,9 +737,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_Mises,requested) + self.__add_generic_pointwise(_add_Mises,{'x':x}) def add_norm(self,x,ord=None): @@ -769,7 +752,7 @@ class DADF5(): Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. """ - def __add_norm(x,ord): + def _add_norm(x,ord): o = ord if len(x['data'].shape) == 2: @@ -793,9 +776,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_norm,requested,{'ord':ord}) + self.__add_generic_pointwise(_add_norm,{'x':x},{'ord':ord}) def add_PK2(self,F='F',P='P'): @@ -810,7 +791,7 @@ class DADF5(): Label of the dataset containing the deformation gradient. Default value is ‘F’. """ - def __add_PK2(F,P): + def _add_PK2(F,P): return { 'data': mechanics.PK2(F['data'],P['data']), @@ -824,10 +805,7 @@ class DADF5(): } } - requested = [{'label':F,'arg':'F'}, - {'label':P,'arg':'P'},] - - self.__add_generic_pointwise(__add_PK2,requested) + self.__add_generic_pointwise(_add_PK2,{'F':F,'P':P}) def add_pole(self,q,p,polar=False): @@ -844,7 +822,7 @@ class DADF5(): Give pole in polar coordinates. Default is false. """ - def __add_pole(q,p,polar): + def _add_pole(q,p,polar): pole = np.array(p) unit_pole = pole/np.linalg.norm(pole) @@ -868,12 +846,10 @@ class DADF5(): } } - requested = [{'label':q,'arg':'q'}] - - self.__add_generic_pointwise(__add_pole,requested,{'p':p,'polar':polar}) + self.__add_generic_pointwise(_add_pole,{'q':q},{'p':p,'polar':polar}) - def add_rotational_part(self,F='F'): + def add_rotational_part(self,F): """ Add rotational part of a deformation gradient. @@ -883,7 +859,7 @@ class DADF5(): Label of the dataset containing a deformation gradient. Default value is ‘F’. """ - def __add_rotational_part(F): + def _add_rotational_part(F): return { 'data': mechanics.rotational_part(F['data']), @@ -895,9 +871,7 @@ class DADF5(): } } - requested = [{'label':F,'arg':'F'}] - - self.__add_generic_pointwise(__add_rotational_part,requested) + self.__add_generic_pointwise(_add_rotational_part,{'F':F}) def add_spherical(self,x): @@ -910,7 +884,7 @@ class DADF5(): Label of the dataset containing a tensor. """ - def __add_spherical(x): + def _add_spherical(x): if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): raise ValueError @@ -925,9 +899,7 @@ class DADF5(): } } - requested = [{'label':x,'arg':'x'}] - - self.__add_generic_pointwise(__add_spherical,requested) + self.__add_generic_pointwise(_add_spherical,{'x':x}) def add_strain_tensor(self,F='F',t='U',m=0): @@ -947,7 +919,7 @@ class DADF5(): Order of the strain calculation. Default value is ‘0.0’. """ - def __add_strain_tensor(F,t,m): + def _add_strain_tensor(F,t,m): return { 'data': mechanics.strain_tensor(F['data'],t,m), @@ -959,9 +931,7 @@ class DADF5(): } } - requested = [{'label':F,'arg':'F'}] - - self.__add_generic_pointwise(__add_strain_tensor,requested,{'t':t,'m':m}) + self.__add_generic_pointwise(_add_strain_tensor,{'F':F},{'t':t,'m':m}) def add_stretch_tensor(self,F='F',t='U'): @@ -977,7 +947,7 @@ class DADF5(): Default value is ‘U’. """ - def __add_stretch_tensor(F,t): + def _add_stretch_tensor(F,t): return { 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), @@ -990,12 +960,10 @@ class DADF5(): } } - requested = [{'label':F,'arg':'F'}] - - self.__add_generic_pointwise(__add_stretch_tensor,requested,{'t':t}) + self.__add_generic_pointwise(_add_stretch_tensor,{'F':F},{'t':t}) - def __add_generic_pointwise(self,func,datasets_requested,extra_args={}): + def __add_generic_pointwise(self,func,dataset_mapping,args={}): """ General function to add pointwise data. @@ -1022,16 +990,16 @@ class DADF5(): todo = [] # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task - for group in self.groups_with_datasets([d['label'] for d in datasets_requested]): + for group in self.groups_with_datasets(dataset_mapping.values()): with h5py.File(self.fname,'r') as f: datasets_in = {} - for d in datasets_requested: - loc = f[group+'/'+d['label']] + for arg,label in dataset_mapping.items(): + loc = f[group+'/'+label] data = loc[()] meta = {k:loc.attrs[k].decode() for k in loc.attrs.keys()} - datasets_in[d['arg']] = {'data': data, 'meta' : meta, 'label' : d['label']} + datasets_in[arg] = {'data': data, 'meta': meta, 'label': label} - todo.append({'in':{**datasets_in,**extra_args},'func':func,'group':group,'results':results}) + todo.append({'in':{**datasets_in,**args},'func':func,'group':group,'results':results}) pool.map(job, todo[:N_added]) # initialize From c3740b4ba042afa541921f706662aba02d58a405 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 07:45:05 +0100 Subject: [PATCH 15/51] follow 4 space indentation convention --- python/damask/dadf5.py | 2134 ++++++++++++++++++++-------------------- 1 file changed, 1066 insertions(+), 1068 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index c58713e55..f61a471a9 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -15,1163 +15,1161 @@ from . import Rotation from . import Orientation from . import Environment -# ------------------------------------------------------------------ class DADF5(): - """ - Read and write to DADF5 files. - - DADF5 files contain DAMASK results. - """ - -# ------------------------------------------------------------------ - def __init__(self,fname): """ - Opens an existing DADF5 file. - - Parameters - ---------- - fname : str - name of the DADF5 file to be openend. + Read and write to DADF5 files. + DADF5 files contain DAMASK results. """ - with h5py.File(fname,'r') as f: - try: - self.version_major = f.attrs['DADF5_version_major'] - self.version_minor = f.attrs['DADF5_version_minor'] - except KeyError: - self.version_major = f.attrs['DADF5-major'] - self.version_minor = f.attrs['DADF5-minor'] + def __init__(self,fname): + """ + Opens an existing DADF5 file. - if self.version_major != 0 or not 2 <= self.version_minor <= 6: - raise TypeError('Unsupported DADF5 version {}.{} '.format(f.attrs['DADF5_version_major'], - f.attrs['DADF5_version_minor'])) + Parameters + ---------- + fname : str + name of the DADF5 file to be openend. - self.structured = 'grid' in f['geometry'].attrs.keys() + """ + with h5py.File(fname,'r') as f: - if self.structured: - self.grid = f['geometry'].attrs['grid'] - self.size = f['geometry'].attrs['size'] - if self.version_major == 0 and self.version_minor >= 5: - self.origin = f['geometry'].attrs['origin'] + try: + self.version_major = f.attrs['DADF5_version_major'] + self.version_minor = f.attrs['DADF5_version_minor'] + except KeyError: + self.version_major = f.attrs['DADF5-major'] + self.version_minor = f.attrs['DADF5-minor'] + + if self.version_major != 0 or not 2 <= self.version_minor <= 6: + raise TypeError('Unsupported DADF5 version {}.{} '.format(f.attrs['DADF5_version_major'], + f.attrs['DADF5_version_minor'])) + + self.structured = 'grid' in f['geometry'].attrs.keys() + + if self.structured: + self.grid = f['geometry'].attrs['grid'] + self.size = f['geometry'].attrs['size'] + if self.version_major == 0 and self.version_minor >= 5: + self.origin = f['geometry'].attrs['origin'] - r=re.compile('inc[0-9]+') - increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} - self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)] - self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] + r=re.compile('inc[0-9]+') + increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} + self.increments = [increments_unsorted[i] for i in sorted(increments_unsorted)] + self.times = [round(f[i].attrs['time/s'],12) for i in self.increments] - self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent']) - self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])] - self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])] + self.Nmaterialpoints, self.Nconstituents = np.shape(f['mapping/cellResults/constituent']) + self.materialpoints = [m.decode() for m in np.unique(f['mapping/cellResults/materialpoint']['Name'])] + self.constituents = [c.decode() for c in np.unique(f['mapping/cellResults/constituent'] ['Name'])] - self.con_physics = [] - for c in self.constituents: - self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() - self.con_physics = list(set(self.con_physics)) # make unique + self.con_physics = [] + for c in self.constituents: + self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() + self.con_physics = list(set(self.con_physics)) # make unique - self.mat_physics = [] - for m in self.materialpoints: - self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() - self.mat_physics = list(set(self.mat_physics)) # make unique + self.mat_physics = [] + for m in self.materialpoints: + self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() + self.mat_physics = list(set(self.mat_physics)) # make unique - self.visible= {'increments': self.increments, - 'constituents': self.constituents, - 'materialpoints': self.materialpoints, - 'constituent': range(self.Nconstituents), # ToDo: stupid naming - 'con_physics': self.con_physics, - 'mat_physics': self.mat_physics} + self.visible= {'increments': self.increments, + 'constituents': self.constituents, + 'materialpoints': self.materialpoints, + 'constituent': range(self.Nconstituents), # ToDo: stupid naming + 'con_physics': self.con_physics, + 'mat_physics': self.mat_physics} - self.fname = fname + self.fname = fname - def __manage_visible(self,datasets,what,action): - """ - Manages the visibility of the groups. + def __manage_visible(self,datasets,what,action): + """ + Manages the visibility of the groups. - Parameters - ---------- - datasets : list of str or Boolean - name of datasets as list, supports ? and * wildcards. - True is equivalent to [*], False is equivalent to [] - what : str - attribute to change (must be in self.visible) - action : str - select from 'set', 'add', and 'del' + Parameters + ---------- + datasets : list of str or Boolean + name of datasets as list, supports ? and * wildcards. + True is equivalent to [*], False is equivalent to [] + what : str + attribute to change (must be in self.visible) + action : str + select from 'set', 'add', and 'del' - """ - # allow True/False and string arguments - if datasets is True: - datasets = ['*'] - elif datasets is False: - datasets = [] - choice = [datasets] if isinstance(datasets,str) else datasets + """ + # allow True/False and string arguments + if datasets is True: + datasets = ['*'] + elif datasets is False: + datasets = [] + choice = [datasets] if isinstance(datasets,str) else datasets - valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_] - existing = set(self.visible[what]) + valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_] + existing = set(self.visible[what]) - if action == 'set': - self.visible[what] = valid - elif action == 'add': - self.visible[what] = list(existing.union(valid)) - elif action == 'del': - self.visible[what] = list(existing.difference_update(valid)) + if action == 'set': + self.visible[what] = valid + elif action == 'add': + self.visible[what] = list(existing.union(valid)) + elif action == 'del': + self.visible[what] = list(existing.difference_update(valid)) - def __time_to_inc(self,start,end): - selected = [] - for i,time in enumerate(self.times): - if start <= time <= end: - selected.append(self.increments[i]) - return selected + def __time_to_inc(self,start,end): + selected = [] + for i,time in enumerate(self.times): + if start <= time <= end: + selected.append(self.increments[i]) + return selected - def set_by_time(self,start,end): - """ - Set active increments based on start and end time. + def set_by_time(self,start,end): + """ + Set active increments based on start and end time. - Parameters - ---------- - start : float - start time (included) - end : float - end time (included) + Parameters + ---------- + start : float + start time (included) + end : float + end time (included) - """ - self.__manage_visible(self.__time_to_inc(start,end),'increments','set') + """ + self.__manage_visible(self.__time_to_inc(start,end),'increments','set') - def add_by_time(self,start,end): - """ - Add to active increments based on start and end time. + def add_by_time(self,start,end): + """ + Add to active increments based on start and end time. - Parameters - ---------- - start : float - start time (included) - end : float - end time (included) + Parameters + ---------- + start : float + start time (included) + end : float + end time (included) - """ - self.__manage_visible(self.__time_to_inc(start,end),'increments','add') + """ + self.__manage_visible(self.__time_to_inc(start,end),'increments','add') - def del_by_time(self,start,end): - """ - Delete from active increments based on start and end time. + def del_by_time(self,start,end): + """ + Delete from active increments based on start and end time. - Parameters - ---------- - start : float - start time (included) - end : float - end time (included) + Parameters + ---------- + start : float + start time (included) + end : float + end time (included) - """ - self.__manage_visible(self.__time_to_inc(start,end),'increments','del') + """ + self.__manage_visible(self.__time_to_inc(start,end),'increments','del') - def set_by_increment(self,start,end): - """ - Set active time increments based on start and end increment. + def set_by_increment(self,start,end): + """ + Set active time increments based on start and end increment. - Parameters - ---------- - start : int - start increment (included) - end : int - end increment (included) + Parameters + ---------- + start : int + start increment (included) + end : int + end increment (included) - """ - if self.version_minor >= 4: - self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','set') - else: - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','set') + """ + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','set') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','set') - def add_by_increment(self,start,end): - """ - Add to active time increments based on start and end increment. + def add_by_increment(self,start,end): + """ + Add to active time increments based on start and end increment. - Parameters - ---------- - start : int - start increment (included) - end : int - end increment (included) + Parameters + ---------- + start : int + start increment (included) + end : int + end increment (included) - """ - if self.version_minor >= 4: - self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','add') - else: - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','add') + """ + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','add') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','add') - def del_by_increment(self,start,end): - """ - Delet from active time increments based on start and end increment. + def del_by_increment(self,start,end): + """ + Delet from active time increments based on start and end increment. - Parameters - ---------- - start : int - start increment (included) - end : int - end increment (included) + Parameters + ---------- + start : int + start increment (included) + end : int + end increment (included) - """ - if self.version_minor >= 4: - self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','del') - else: - self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','del') + """ + if self.version_minor >= 4: + self.__manage_visible([ 'inc{}'.format(i) for i in range(start,end+1)],'increments','del') + else: + self.__manage_visible(['inc{:05d}'.format(i) for i in range(start,end+1)],'increments','del') - def iter_visible(self,what): - """ - Iterate over visible items by setting each one visible. + def iter_visible(self,what): + """ + Iterate over visible items by setting each one visible. - Parameters - ---------- - what : str - attribute to change (must be in self.visible) + Parameters + ---------- + what : str + attribute to change (must be in self.visible) - """ - datasets = self.visible[what] - last_datasets = datasets.copy() - for dataset in datasets: - if last_datasets != self.visible[what]: + """ + datasets = self.visible[what] + last_datasets = datasets.copy() + for dataset in datasets: + if last_datasets != self.visible[what]: + self.__manage_visible(datasets,what,'set') + raise Exception + self.__manage_visible(dataset,what,'set') + last_datasets = self.visible[what] + yield dataset self.__manage_visible(datasets,what,'set') - raise Exception - self.__manage_visible(dataset,what,'set') - last_datasets = self.visible[what] - yield dataset - self.__manage_visible(datasets,what,'set') - def set_visible(self,what,datasets): - """ - Set active groups. + def set_visible(self,what,datasets): + """ + Set active groups. - Parameters - ---------- - datasets : list of str or Boolean - name of datasets as list, supports ? and * wildcards. - True is equivalent to [*], False is equivalent to [] - what : str - attribute to change (must be in self.visible) + Parameters + ---------- + datasets : list of str or Boolean + name of datasets as list, supports ? and * wildcards. + True is equivalent to [*], False is equivalent to [] + what : str + attribute to change (must be in self.visible) - """ - self.__manage_visible(datasets,what,'set') + """ + self.__manage_visible(datasets,what,'set') - def add_visible(self,what,datasets): - """ - Add to active groups. + def add_visible(self,what,datasets): + """ + Add to active groups. - Parameters - ---------- - datasets : list of str or Boolean - name of datasets as list, supports ? and * wildcards. - True is equivalent to [*], False is equivalent to [] - what : str - attribute to change (must be in self.visible) + Parameters + ---------- + datasets : list of str or Boolean + name of datasets as list, supports ? and * wildcards. + True is equivalent to [*], False is equivalent to [] + what : str + attribute to change (must be in self.visible) - """ - self.__manage_visible(datasets,what,'add') + """ + self.__manage_visible(datasets,what,'add') - def del_visible(self,what,datasets): - """ - Delete from active groupe. + def del_visible(self,what,datasets): + """ + Delete from active groupe. - Parameters - ---------- - datasets : list of str or Boolean - name of datasets as list, supports ? and * wildcards. - True is equivalent to [*], False is equivalent to [] - what : str - attribute to change (must be in self.visible) + Parameters + ---------- + datasets : list of str or Boolean + name of datasets as list, supports ? and * wildcards. + True is equivalent to [*], False is equivalent to [] + what : str + attribute to change (must be in self.visible) - """ - self.__manage_visible(datasets,what,'del') + """ + self.__manage_visible(datasets,what,'del') - def groups_with_datasets(self,datasets): - """ - Get groups that contain all requested datasets. + def groups_with_datasets(self,datasets): + """ + Get groups that contain all requested datasets. - Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/* - are considered as they contain the data. - Single strings will be treated as list with one entry. + Only groups within inc?????/constituent/*_*/* inc?????/materialpoint/*_*/* + are considered as they contain the data. + Single strings will be treated as list with one entry. - Wild card matching is allowed, but the number of arguments need to fit. + Wild card matching is allowed, but the number of arguments need to fit. - Parameters - ---------- - datasets : iterable or str or boolean + Parameters + ---------- + datasets : iterable or str or boolean - Examples - -------- - datasets = False matches no group - datasets = True matches all groups - datasets = ['F','P'] matches a group with ['F','P','sigma'] - datasets = ['*','P'] matches a group with ['F','P'] - datasets = ['*'] does not match a group with ['F','P','sigma'] - datasets = ['*','*'] does not match a group with ['F','P','sigma'] - datasets = ['*','*','*'] matches a group with ['F','P','sigma'] + Examples + -------- + datasets = False matches no group + datasets = True matches all groups + datasets = ['F','P'] matches a group with ['F','P','sigma'] + datasets = ['*','P'] matches a group with ['F','P'] + datasets = ['*'] does not match a group with ['F','P','sigma'] + datasets = ['*','*'] does not match a group with ['F','P','sigma'] + datasets = ['*','*','*'] matches a group with ['F','P','sigma'] - """ - if datasets is False: return [] - sets = [datasets] if isinstance(datasets,str) else datasets + """ + if datasets is False: return [] + sets = [datasets] if isinstance(datasets,str) else datasets - groups = [] + groups = [] - with h5py.File(self.fname,'r') as f: - for i in self.iter_visible('increments'): - for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): - for oo in self.iter_visible(o): - for pp in self.iter_visible(p): - group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue - if sets is True: - groups.append(group) - else: - match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in sets] for e in e_] - if len(set(match)) == len(sets) : groups.append(group) - return groups - - - def list_data(self): - """Return information on all active datasets in the file.""" - message = '' - with h5py.File(self.fname,'r') as f: - for i in self.iter_visible('increments'): - message+='\n{} ({}s)\n'.format(i,self.times[self.increments.index(i)]) - for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): - for oo in self.iter_visible(o): - message+=' {}\n'.format(oo) - for pp in self.iter_visible(p): - message+=' {}\n'.format(pp) - group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue - for d in f[group].keys(): - try: - dataset = f['/'.join([group,d])] - message+=' {} / ({}): {}\n'.format(d,dataset.attrs['Unit'].decode(),dataset.attrs['Description'].decode()) - except KeyError: - pass - return message - - - def get_dataset_location(self,label): - """Return the location of all active datasets with given label.""" - path = [] - with h5py.File(self.fname,'r') as f: - for i in self.iter_visible('increments'): - k = '/'.join([i,'geometry',label]) - try: - f[k] - path.append(k) - except KeyError as e: - pass - for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): - for oo in self.iter_visible(o): - for pp in self.iter_visible(p): - k = '/'.join([i,o[:-1],oo,pp,label]) - try: - f[k] - path.append(k) - except KeyError as e: - pass - return path - - - def get_constituent_ID(self,c=0): - """Pointwise constituent ID.""" - with h5py.File(self.fname,'r') as f: - names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str') - return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32) - - - def get_crystal_structure(self): # ToDo: extension to multi constituents/phase - """Info about the crystal structure.""" - with h5py.File(self.fname,'r') as f: - return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string - - - def read_dataset(self,path,c=0,plain=False): - """ - Dataset for all points/cells. - - If more than one path is given, the dataset is composed of the individual contributions. - """ - with h5py.File(self.fname,'r') as f: - shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] - if len(shape) == 1: shape = shape +(1,) - dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) - for pa in path: - label = pa.split('/')[2] - - if (pa.split('/')[1] == 'geometry'): - dataset = np.array(f[pa]) - continue - - p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] - if len(p)>0: - u = (f['mapping/cellResults/constituent']['Position'][p,c]) - a = np.array(f[pa]) - if len(a.shape) == 1: - a=a.reshape([a.shape[0],1]) - dataset[p,:] = a[u,:] - - p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0] - if len(p)>0: - u = (f['mapping/cellResults/materialpoint']['Position'][p.tolist()]) - a = np.array(f[pa]) - if len(a.shape) == 1: - a=a.reshape([a.shape[0],1]) - dataset[p,:] = a[u,:] - - if plain and dataset.dtype.names is not None: - return dataset.view(('float64',len(dataset.dtype.names))) - else: - return dataset - - - def cell_coordinates(self): - """Return initial coordinates of the cell centers.""" - if self.structured: - delta = self.size/self.grid*0.5 - z, y, x = np.meshgrid(np.linspace(delta[2],self.size[2]-delta[2],self.grid[2]), - np.linspace(delta[1],self.size[1]-delta[1],self.grid[1]), - np.linspace(delta[0],self.size[0]-delta[0],self.grid[0]), - ) - return np.concatenate((x[:,:,:,None],y[:,:,:,None],z[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) - else: - with h5py.File(self.fname,'r') as f: - return f['geometry/x_c'][()] - - - def add_absolute(self,x): - """ - Add absolute value. - - Parameters - ---------- - x : str - Label of the dataset containing a scalar, vector, or tensor. - - """ - def _add_absolute(x): - - return { - 'data': np.abs(x['data']), - 'label': '|{}|'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_abs v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_absolute,{'x':x}) - - - def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): - """ - Add result of a general formula. - - Parameters - ---------- - formula : str - Formula, refer to datasets by ‘#Label#‘. - label : str - Label of the dataset containing the result of the calculation. - unit : str, optional - Physical unit of the result. - description : str, optional - Human readable description of the result. - vectorized : bool, optional - Indicate whether the formula is written in vectorized form. Default is ‘True’. - - """ - if vectorized is False: - raise NotImplementedError - - def _add_calculation(**kwargs): - - formula = kwargs['formula'] - for d in re.findall(r'#(.*?)#',formula): - formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) - - return { - 'data': eval(formula), - 'label': kwargs['label'], - 'meta': { - 'Unit': kwargs['unit'], - 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), - 'Creator': 'dadf5.py:add_calculation v{}'.format(version) - } - } - - dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula - args = {'formula':formula,'label':label,'unit':unit,'description':description} - - self.__add_generic_pointwise(_add_calculation,dataset_mapping,args) - - - def add_Cauchy(self,F='F',P='P'): - """ - Add Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. - - Parameters - ---------- - P : str, optional - Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. - F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. - - """ - def _add_Cauchy(F,P): - - return { - 'data': mechanics.Cauchy(F['data'],P['data']), - 'label': 'sigma', - 'meta': { - 'Unit': P['meta']['Unit'], - 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_Cauchy,{'F':F,'P':P}) - - - def add_determinant(self,x): - """ - Add the determinant of a tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a tensor. - - """ - def _add_determinant(x): - - return { - 'data': np.linalg.det(x['data']), - 'label': 'det({})'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_determinant v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_determinant,{'x':x}) - - - def add_deviator(self,x): - """ - Add the deviatoric part of a tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a tensor. - - """ - def _add_deviator(x): - - if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): - raise ValueError - - return { - 'data': mechanics.deviatoric_part(x['data']), - 'label': 's_{}'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_deviator v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_deviator,{'x':x}) - - - def add_eigenvalues(self,x): - """ - Add eigenvalues of symmetric tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric tensor. - - """ - def _add_eigenvalue(x): - - return { - 'data': mechanics.eigenvalues(x['data']), - 'label': 'lambda({})'.format(x['label']), - 'meta' : { - 'Unit': x['meta']['Unit'], - 'Description': 'Eigenvalues of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_eigenvalue,{'x':x}) - - - def add_eigenvectors(self,x): - """ - Add eigenvectors of symmetric tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric tensor. - - """ - def _add_eigenvector(x): - - return { - 'data': mechanics.eigenvectors(x['data']), - 'label': 'v({})'.format(x['label']), - 'meta' : { - 'Unit': '1', - 'Description': 'Eigenvectors of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_eigenvector,{'x':x}) - - - def add_IPFcolor(self,q,p): - """ - Add RGB color tuple of inverse pole figure (IPF) color. - - Parameters - ---------- - q : str - Label of the dataset containing the orientation data as quaternions. - p : list of int - Pole direction as Miller indices. - - """ - def _add_IPFcolor(q,p): - - pole = np.array(p) - unit_pole = pole/np.linalg.norm(pole) - m = util.scale_to_coprime(pole) - colors = np.empty((len(q['data']),3),np.uint8) - - lattice = q['meta']['Lattice'] - - for i,q in enumerate(q['data']): - o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() - colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) - - return { - 'data': colors, - 'label': 'IPFcolor_{{{} {} {}>'.format(*m), - 'meta' : { - 'Unit': 'RGB (8bit)', - 'Lattice': lattice, - 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), - 'Creator': 'dadf5.py:addIPFcolor v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'p':p}) - - - def add_maximum_shear(self,x): - """ - Add maximum shear components of symmetric tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric tensor. - - """ - def _add_maximum_shear(x): - - return { - 'data': mechanics.maximum_shear(x['data']), - 'label': 'max_shear({})'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Maximum shear component of of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_maximum_shear,{'x':x}) - - - def add_Mises(self,x): - """ - Add the equivalent Mises stress or strain of a symmetric tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a symmetric stress or strain tensor. - - """ - def _add_Mises(x): - - t = 'strain' if x['meta']['Unit'] == '1' else \ - 'stress' - return { - 'data': mechanics.Mises_strain(x['data']) if t=='strain' else mechanics.Mises_stress(x['data']), - 'label': '{}_vM'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Mises equivalent {} of {} ({})'.format(t,x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_Mises v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_Mises,{'x':x}) - - - def add_norm(self,x,ord=None): - """ - Add the norm of vector or tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a vector or tensor. - ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional - Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. - - """ - def _add_norm(x,ord): - - o = ord - if len(x['data'].shape) == 2: - axis = 1 - t = 'vector' - if o is None: o = 2 - elif len(x['data'].shape) == 3: - axis = (1,2) - t = 'tensor' - if o is None: o = 'fro' - else: - raise ValueError - - return { - 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), - 'label': '|{}|_{}'.format(x['label'],o), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_norm v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_norm,{'x':x},{'ord':ord}) - - - def add_PK2(self,F='F',P='P'): - """ - Add 2. Piola-Kirchhoff calculated from 1. Piola-Kirchhoff stress and deformation gradient. - - Parameters - ---------- - P : str, optional - Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. - F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. - - """ - def _add_PK2(F,P): - - return { - 'data': mechanics.PK2(F['data'],P['data']), - 'label': 'S', - 'meta': { - 'Unit': P['meta']['Unit'], - 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_PK2,{'F':F,'P':P}) - - - def add_pole(self,q,p,polar=False): - """ - Add coordinates of stereographic projection of given pole in crystal frame. - - Parameters - ---------- - q : str - Label of the dataset containing the crystallographic orientation as a quaternion. - p : numpy.array of shape (3) - Pole in crystal frame. - polar : bool, optional - Give pole in polar coordinates. Default is false. - - """ - def _add_pole(q,p,polar): - - pole = np.array(p) - unit_pole = pole/np.linalg.norm(pole) - m = util.scale_to_coprime(pole) - coords = np.empty((len(q['data']),2)) - - for i,q in enumerate(q['data']): - o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) - rotatedPole = o*pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection - coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] - - return { - 'data': coords, - 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), - 'meta' : { - 'Unit': '1', - 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ - .format('Polar' if polar else 'Cartesian'), - 'Creator' : 'dadf5.py:addPole v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_pole,{'q':q},{'p':p,'polar':polar}) - - - def add_rotational_part(self,F): - """ - Add rotational part of a deformation gradient. - - Parameters - ---------- - F : str - Label of the dataset containing a deformation gradient. Default value is ‘F’. - - """ - def _add_rotational_part(F): - - return { - 'data': mechanics.rotational_part(F['data']), - 'label': 'R({})'.format(F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_rotational_part v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_rotational_part,{'F':F}) - - - def add_spherical(self,x): - """ - Add the spherical (hydrostatic) part of a tensor. - - Parameters - ---------- - x : str - Label of the dataset containing a tensor. - - """ - def _add_spherical(x): - - if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): - raise ValueError - - return { - 'data': mechanics.spherical_part(x['data']), - 'label': 'p_{}'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Spherical component of tensor {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_spherical v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_spherical,{'x':x}) - - - def add_strain_tensor(self,F='F',t='U',m=0): - """ - Add strain tensor calculated from a deformation gradient. - - For details refer to damask.mechanics.strain_tensor - - Parameters - ---------- - F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. - t : {‘V’, ‘U’}, optional - Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. - Default value is ‘U’. - m : float, optional - Order of the strain calculation. Default value is ‘0.0’. - - """ - def _add_strain_tensor(F,t,m): - - return { - 'data': mechanics.strain_tensor(F['data'],t,m), - 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': 'Strain tensor of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_strain_tensor v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_strain_tensor,{'F':F},{'t':t,'m':m}) - - - def add_stretch_tensor(self,F='F',t='U'): - """ - Add stretch tensor calculated from a deformation gradient. - - Parameters - ---------- - F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. - t : {‘V’, ‘U’}, optional - Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. - Default value is ‘U’. - - """ - def _add_stretch_tensor(F,t): - - return { - 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), - 'label': '{}({})'.format(t,F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', - F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_stretch_tensor v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_stretch_tensor,{'F':F},{'t':t}) - - - def __add_generic_pointwise(self,func,dataset_mapping,args={}): - """ - General function to add pointwise data. - - Parameters - ---------- - func : function - Function that calculates a new dataset from one or more datasets per HDF5 group. - datasets_requested : list of dictionaries - Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func). - extra_args : dictionary, optional - Any extra arguments parsed to func. - - """ - def job(args): - """Call function with input data + extra arguments, returns results + group.""" - args['results'].put({**args['func'](**args['in']),'group':args['group']}) - - env = Environment() - N_threads = 1#int(env.options['DAMASK_NUM_THREADS']) - - results = Queue(N_threads) - pool = util.ThreadPool(N_threads) - N_added = N_threads + 1 - - todo = [] - # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task - for group in self.groups_with_datasets(dataset_mapping.values()): - with h5py.File(self.fname,'r') as f: - datasets_in = {} - for arg,label in dataset_mapping.items(): - loc = f[group+'/'+label] - data = loc[()] - meta = {k:loc.attrs[k].decode() for k in loc.attrs.keys()} - datasets_in[arg] = {'data': data, 'meta': meta, 'label': label} - - todo.append({'in':{**datasets_in,**args},'func':func,'group':group,'results':results}) - - pool.map(job, todo[:N_added]) # initialize - - N_not_calculated = len(todo) - while N_not_calculated > 0: - result = results.get() - with h5py.File(self.fname,'a') as f: # write to file - dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) - for k in result['meta'].keys(): - dataset_out.attrs[k] = result['meta'][k].encode() - N_not_calculated-=1 - - if N_added < len(todo): # add more jobs - pool.add_task(job,todo[N_added]) - N_added +=1 - - pool.wait_completion() - - - def to_vtk(self,labels,mode='Cell'): - """ - Export to vtk cell/point data. - - Parameters - ---------- - labels : str or list of - Labels of the datasets to be exported. - mode : str, either 'Cell' or 'Point' - Export in cell format or point format. - Default value is 'Cell'. - - """ - if mode=='Cell': - - if self.structured: - - coordArray = [vtk.vtkDoubleArray(),vtk.vtkDoubleArray(),vtk.vtkDoubleArray()] - for dim in [0,1,2]: - for c in np.linspace(0,self.size[dim],1+self.grid[dim]): - coordArray[dim].InsertNextValue(c) - - vtk_geom = vtk.vtkRectilinearGrid() - vtk_geom.SetDimensions(*(self.grid+1)) - vtk_geom.SetXCoordinates(coordArray[0]) - vtk_geom.SetYCoordinates(coordArray[1]) - vtk_geom.SetZCoordinates(coordArray[2]) - - else: - - nodes = vtk.vtkPoints() with h5py.File(self.fname,'r') as f: - nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True)) + for i in self.iter_visible('increments'): + for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): + for oo in self.iter_visible(o): + for pp in self.iter_visible(p): + group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue + if sets is True: + groups.append(group) + else: + match = [e for e_ in [glob.fnmatch.filter(f[group].keys(),s) for s in sets] for e in e_] + if len(set(match)) == len(sets) : groups.append(group) + return groups - vtk_geom = vtk.vtkUnstructuredGrid() - vtk_geom.SetPoints(nodes) - vtk_geom.Allocate(f['/geometry/T_c'].shape[0]) - if self.version_major == 0 and self.version_minor <= 5: - vtk_type = vtk.VTK_HEXAHEDRON - n_nodes = 8 - else: - if f['/geometry/T_c'].attrs['VTK_TYPE'] == b'TRIANGLE': - vtk_type = vtk.VTK_TRIANGLE - n_nodes = 3 - elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'QUAD': - vtk_type = vtk.VTK_QUAD - n_nodes = 4 - elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'TETRA': # not tested - vtk_type = vtk.VTK_TETRA - n_nodes = 4 - elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'HEXAHEDRON': - vtk_type = vtk.VTK_HEXAHEDRON - n_nodes = 8 + def list_data(self): + """Return information on all active datasets in the file.""" + message = '' + with h5py.File(self.fname,'r') as f: + for i in self.iter_visible('increments'): + message+='\n{} ({}s)\n'.format(i,self.times[self.increments.index(i)]) + for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): + for oo in self.iter_visible(o): + message+=' {}\n'.format(oo) + for pp in self.iter_visible(p): + message+=' {}\n'.format(pp) + group = '/'.join([i,o[:-1],oo,pp]) # o[:-1]: plural/singular issue + for d in f[group].keys(): + try: + dataset = f['/'.join([group,d])] + message+=' {} / ({}): {}\n'.format(d,dataset.attrs['Unit'].decode(),dataset.attrs['Description'].decode()) + except KeyError: + pass + return message - for i in f['/geometry/T_c']: - vtk_geom.InsertNextCell(vtk_type,n_nodes,i-1) - elif mode == 'Point': - Points = vtk.vtkPoints() - Vertices = vtk.vtkCellArray() - for c in self.cell_coordinates(): - pointID = Points.InsertNextPoint(c) - Vertices.InsertNextCell(1) - Vertices.InsertCellPoint(pointID) + def get_dataset_location(self,label): + """Return the location of all active datasets with given label.""" + path = [] + with h5py.File(self.fname,'r') as f: + for i in self.iter_visible('increments'): + k = '/'.join([i,'geometry',label]) + try: + f[k] + path.append(k) + except KeyError as e: + pass + for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): + for oo in self.iter_visible(o): + for pp in self.iter_visible(p): + k = '/'.join([i,o[:-1],oo,pp,label]) + try: + f[k] + path.append(k) + except KeyError as e: + pass + return path - vtk_geom = vtk.vtkPolyData() - vtk_geom.SetPoints(Points) - vtk_geom.SetVerts(Vertices) - vtk_geom.Modified() - N_digits = int(np.floor(np.log10(int(self.increments[-1][3:]))))+1 + def get_constituent_ID(self,c=0): + """Pointwise constituent ID.""" + with h5py.File(self.fname,'r') as f: + names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str') + return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32) - for i,inc in enumerate(self.iter_visible('increments')): - vtk_data = [] - materialpoints_backup = self.visible['materialpoints'].copy() - self.set_visible('materialpoints',False) - for label in (labels if isinstance(labels,list) else [labels]): - for p in self.iter_visible('con_physics'): - if p != 'generic': - for c in self.iter_visible('constituents'): - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! - vtk_geom.GetCellData().AddArray(vtk_data[-1]) + def get_crystal_structure(self): # ToDo: extension to multi constituents/phase + """Info about the crystal structure.""" + with h5py.File(self.fname,'r') as f: + return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string - else: - x = self.get_dataset_location(label) - if len(x) == 0: + + def read_dataset(self,path,c=0,plain=False): + """ + Dataset for all points/cells. + + If more than one path is given, the dataset is composed of the individual contributions. + """ + with h5py.File(self.fname,'r') as f: + shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] + if len(shape) == 1: shape = shape +(1,) + dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) + for pa in path: + label = pa.split('/')[2] + + if (pa.split('/')[1] == 'geometry'): + dataset = np.array(f[pa]) continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name - dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name - vtk_data[-1].SetName(dset_name) - vtk_geom.GetCellData().AddArray(vtk_data[-1]) - self.set_visible('materialpoints',materialpoints_backup) + p = np.where(f['mapping/cellResults/constituent'][:,c]['Name'] == str.encode(label))[0] + if len(p)>0: + u = (f['mapping/cellResults/constituent']['Position'][p,c]) + a = np.array(f[pa]) + if len(a.shape) == 1: + a=a.reshape([a.shape[0],1]) + dataset[p,:] = a[u,:] - constituents_backup = self.visible['constituents'].copy() - self.set_visible('constituents',False) - for label in (labels if isinstance(labels,list) else [labels]): - for p in self.iter_visible('mat_physics'): - if p != 'generic': - for m in self.iter_visible('materialpoints'): - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_? - vtk_geom.GetCellData().AddArray(vtk_data[-1]) + p = np.where(f['mapping/cellResults/materialpoint']['Name'] == str.encode(label))[0] + if len(p)>0: + u = (f['mapping/cellResults/materialpoint']['Position'][p.tolist()]) + a = np.array(f[pa]) + if len(a.shape) == 1: + a=a.reshape([a.shape[0],1]) + dataset[p,:] = a[u,:] + + if plain and dataset.dtype.names is not None: + return dataset.view(('float64',len(dataset.dtype.names))) + else: + return dataset + + + def cell_coordinates(self): + """Return initial coordinates of the cell centers.""" + if self.structured: + delta = self.size/self.grid*0.5 + z, y, x = np.meshgrid(np.linspace(delta[2],self.size[2]-delta[2],self.grid[2]), + np.linspace(delta[1],self.size[1]-delta[1],self.grid[1]), + np.linspace(delta[0],self.size[0]-delta[0],self.grid[0]), + ) + return np.concatenate((x[:,:,:,None],y[:,:,:,None],z[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) + else: + with h5py.File(self.fname,'r') as f: + return f['geometry/x_c'][()] + + + def add_absolute(self,x): + """ + Add absolute value. + + Parameters + ---------- + x : str + Label of the dataset containing a scalar, vector, or tensor. + + """ + def _add_absolute(x): + + return { + 'data': np.abs(x['data']), + 'label': '|{}|'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_abs v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_absolute,{'x':x}) + + + def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): + """ + Add result of a general formula. + + Parameters + ---------- + formula : str + Formula, refer to datasets by ‘#Label#‘. + label : str + Label of the dataset containing the result of the calculation. + unit : str, optional + Physical unit of the result. + description : str, optional + Human readable description of the result. + vectorized : bool, optional + Indicate whether the formula is written in vectorized form. Default is ‘True’. + + """ + if vectorized is False: + raise NotImplementedError + + def _add_calculation(**kwargs): + + formula = kwargs['formula'] + for d in re.findall(r'#(.*?)#',formula): + formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) + + return { + 'data': eval(formula), + 'label': kwargs['label'], + 'meta': { + 'Unit': kwargs['unit'], + 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), + 'Creator': 'dadf5.py:add_calculation v{}'.format(version) + } + } + + dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula + args = {'formula':formula,'label':label,'unit':unit,'description':description} + + self.__add_generic_pointwise(_add_calculation,dataset_mapping,args) + + + def add_Cauchy(self,F='F',P='P'): + """ + Add Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + + Parameters + ---------- + P : str, optional + Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. + + """ + def _add_Cauchy(F,P): + + return { + 'data': mechanics.Cauchy(F['data'],P['data']), + 'label': 'sigma', + 'meta': { + 'Unit': P['meta']['Unit'], + 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_Cauchy,{'F':F,'P':P}) + + + def add_determinant(self,x): + """ + Add the determinant of a tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a tensor. + + """ + def _add_determinant(x): + + return { + 'data': np.linalg.det(x['data']), + 'label': 'det({})'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_determinant v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_determinant,{'x':x}) + + + def add_deviator(self,x): + """ + Add the deviatoric part of a tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a tensor. + + """ + def _add_deviator(x): + + if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.deviatoric_part(x['data']), + 'label': 's_{}'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_deviator v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_deviator,{'x':x}) + + + def add_eigenvalues(self,x): + """ + Add eigenvalues of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def _add_eigenvalue(x): + + return { + 'data': mechanics.eigenvalues(x['data']), + 'label': 'lambda({})'.format(x['label']), + 'meta' : { + 'Unit': x['meta']['Unit'], + 'Description': 'Eigenvalues of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_eigenvalue,{'x':x}) + + + def add_eigenvectors(self,x): + """ + Add eigenvectors of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def _add_eigenvector(x): + + return { + 'data': mechanics.eigenvectors(x['data']), + 'label': 'v({})'.format(x['label']), + 'meta' : { + 'Unit': '1', + 'Description': 'Eigenvectors of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_eigenvector,{'x':x}) + + + def add_IPFcolor(self,q,p): + """ + Add RGB color tuple of inverse pole figure (IPF) color. + + Parameters + ---------- + q : str + Label of the dataset containing the orientation data as quaternions. + p : list of int + Pole direction as Miller indices. + + """ + def _add_IPFcolor(q,p): + + pole = np.array(p) + unit_pole = pole/np.linalg.norm(pole) + m = util.scale_to_coprime(pole) + colors = np.empty((len(q['data']),3),np.uint8) + + lattice = q['meta']['Lattice'] + + for i,q in enumerate(q['data']): + o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() + colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) + + return { + 'data': colors, + 'label': 'IPFcolor_{{{} {} {}>'.format(*m), + 'meta' : { + 'Unit': 'RGB (8bit)', + 'Lattice': lattice, + 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), + 'Creator': 'dadf5.py:addIPFcolor v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'p':p}) + + + def add_maximum_shear(self,x): + """ + Add maximum shear components of symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric tensor. + + """ + def _add_maximum_shear(x): + + return { + 'data': mechanics.maximum_shear(x['data']), + 'label': 'max_shear({})'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Maximum shear component of of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_maximum_shear,{'x':x}) + + + def add_Mises(self,x): + """ + Add the equivalent Mises stress or strain of a symmetric tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a symmetric stress or strain tensor. + + """ + def _add_Mises(x): + + t = 'strain' if x['meta']['Unit'] == '1' else \ + 'stress' + return { + 'data': mechanics.Mises_strain(x['data']) if t=='strain' else mechanics.Mises_stress(x['data']), + 'label': '{}_vM'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Mises equivalent {} of {} ({})'.format(t,x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_Mises v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_Mises,{'x':x}) + + + def add_norm(self,x,ord=None): + """ + Add the norm of vector or tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a vector or tensor. + ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional + Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. + + """ + def _add_norm(x,ord): + + o = ord + if len(x['data'].shape) == 2: + axis = 1 + t = 'vector' + if o is None: o = 2 + elif len(x['data'].shape) == 3: + axis = (1,2) + t = 'tensor' + if o is None: o = 'fro' else: - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - vtk_geom.GetCellData().AddArray(vtk_data[-1]) - self.set_visible('constituents',constituents_backup) + raise ValueError - if mode=='Cell': - writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ - vtk.vtkXMLUnstructuredGridWriter() - x = self.get_dataset_location('u_n') - vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0), - deep=True,array_type=vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('u') - vtk_geom.GetPointData().AddArray(vtk_data[-1]) - elif mode == 'Point': - writer = vtk.vtkXMLPolyDataWriter() + return { + 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), + 'label': '|{}|_{}'.format(x['label'],o), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_norm v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_norm,{'x':x},{'ord':ord}) - file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0], - inc[3:].zfill(N_digits), - writer.GetDefaultFileExtension()) + def add_PK2(self,F='F',P='P'): + """ + Add 2. Piola-Kirchhoff calculated from 1. Piola-Kirchhoff stress and deformation gradient. - writer.SetCompressorTypeToZLib() - writer.SetDataModeToBinary() - writer.SetFileName(file_out) - writer.SetInputData(vtk_geom) + Parameters + ---------- + P : str, optional + Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. - writer.Write() + """ + def _add_PK2(F,P): + + return { + 'data': mechanics.PK2(F['data'],P['data']), + 'label': 'S', + 'meta': { + 'Unit': P['meta']['Unit'], + 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_PK2,{'F':F,'P':P}) + + + def add_pole(self,q,p,polar=False): + """ + Add coordinates of stereographic projection of given pole in crystal frame. + + Parameters + ---------- + q : str + Label of the dataset containing the crystallographic orientation as a quaternion. + p : numpy.array of shape (3) + Pole in crystal frame. + polar : bool, optional + Give pole in polar coordinates. Default is false. + + """ + def _add_pole(q,p,polar): + + pole = np.array(p) + unit_pole = pole/np.linalg.norm(pole) + m = util.scale_to_coprime(pole) + coords = np.empty((len(q['data']),2)) + + for i,q in enumerate(q['data']): + o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) + rotatedPole = o*pole # rotate pole according to crystal orientation + (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection + coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] + + return { + 'data': coords, + 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), + 'meta' : { + 'Unit': '1', + 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ + .format('Polar' if polar else 'Cartesian'), + 'Creator' : 'dadf5.py:addPole v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_pole,{'q':q},{'p':p,'polar':polar}) + + + def add_rotational_part(self,F): + """ + Add rotational part of a deformation gradient. + + Parameters + ---------- + F : str + Label of the dataset containing a deformation gradient. Default value is ‘F’. + + """ + def _add_rotational_part(F): + + return { + 'data': mechanics.rotational_part(F['data']), + 'label': 'R({})'.format(F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_rotational_part v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_rotational_part,{'F':F}) + + + def add_spherical(self,x): + """ + Add the spherical (hydrostatic) part of a tensor. + + Parameters + ---------- + x : str + Label of the dataset containing a tensor. + + """ + def _add_spherical(x): + + if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.spherical_part(x['data']), + 'label': 'p_{}'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Spherical component of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_spherical v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_spherical,{'x':x}) + + + def add_strain_tensor(self,F='F',t='U',m=0): + """ + Add strain tensor calculated from a deformation gradient. + + For details refer to damask.mechanics.strain_tensor + + Parameters + ---------- + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. + t : {‘V’, ‘U’}, optional + Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. + Default value is ‘U’. + m : float, optional + Order of the strain calculation. Default value is ‘0.0’. + + """ + def _add_strain_tensor(F,t,m): + + return { + 'data': mechanics.strain_tensor(F['data'],t,m), + 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': 'Strain tensor of {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_strain_tensor v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_strain_tensor,{'F':F},{'t':t,'m':m}) + + + def add_stretch_tensor(self,F='F',t='U'): + """ + Add stretch tensor calculated from a deformation gradient. + + Parameters + ---------- + F : str, optional + Label of the dataset containing the deformation gradient. Default value is ‘F’. + t : {‘V’, ‘U’}, optional + Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. + Default value is ‘U’. + + """ + def _add_stretch_tensor(F,t): + + return { + 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), + 'label': '{}({})'.format(t,F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', + F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_stretch_tensor v{}'.format(version) + } + } + + self.__add_generic_pointwise(_add_stretch_tensor,{'F':F},{'t':t}) + + + def __add_generic_pointwise(self,func,dataset_mapping,args={}): + """ + General function to add pointwise data. + + Parameters + ---------- + func : function + Function that calculates a new dataset from one or more datasets per HDF5 group. + datasets_requested : list of dictionaries + Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func). + extra_args : dictionary, optional + Any extra arguments parsed to func. + + """ + def job(args): + """Call function with input data + extra arguments, returns results + group.""" + args['results'].put({**args['func'](**args['in']),'group':args['group']}) + + env = Environment() + N_threads = 1#int(env.options['DAMASK_NUM_THREADS']) + + results = Queue(N_threads) + pool = util.ThreadPool(N_threads) + N_added = N_threads + 1 + + todo = [] + # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task + for group in self.groups_with_datasets(dataset_mapping.values()): + with h5py.File(self.fname,'r') as f: + datasets_in = {} + for arg,label in dataset_mapping.items(): + loc = f[group+'/'+label] + data = loc[()] + meta = {k:loc.attrs[k].decode() for k in loc.attrs.keys()} + datasets_in[arg] = {'data': data, 'meta': meta, 'label': label} + + todo.append({'in':{**datasets_in,**args},'func':func,'group':group,'results':results}) + + pool.map(job, todo[:N_added]) # initialize + + N_not_calculated = len(todo) + while N_not_calculated > 0: + result = results.get() + with h5py.File(self.fname,'a') as f: # write to file + dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) + for k in result['meta'].keys(): + dataset_out.attrs[k] = result['meta'][k].encode() + N_not_calculated-=1 + + if N_added < len(todo): # add more jobs + pool.add_task(job,todo[N_added]) + N_added +=1 + + pool.wait_completion() + + + def to_vtk(self,labels,mode='Cell'): + """ + Export to vtk cell/point data. + + Parameters + ---------- + labels : str or list of + Labels of the datasets to be exported. + mode : str, either 'Cell' or 'Point' + Export in cell format or point format. + Default value is 'Cell'. + + """ + if mode=='Cell': + + if self.structured: + + coordArray = [vtk.vtkDoubleArray(),vtk.vtkDoubleArray(),vtk.vtkDoubleArray()] + for dim in [0,1,2]: + for c in np.linspace(0,self.size[dim],1+self.grid[dim]): + coordArray[dim].InsertNextValue(c) + + vtk_geom = vtk.vtkRectilinearGrid() + vtk_geom.SetDimensions(*(self.grid+1)) + vtk_geom.SetXCoordinates(coordArray[0]) + vtk_geom.SetYCoordinates(coordArray[1]) + vtk_geom.SetZCoordinates(coordArray[2]) + + else: + + nodes = vtk.vtkPoints() + with h5py.File(self.fname,'r') as f: + nodes.SetData(numpy_support.numpy_to_vtk(f['/geometry/x_n'][()],deep=True)) + + vtk_geom = vtk.vtkUnstructuredGrid() + vtk_geom.SetPoints(nodes) + vtk_geom.Allocate(f['/geometry/T_c'].shape[0]) + + if self.version_major == 0 and self.version_minor <= 5: + vtk_type = vtk.VTK_HEXAHEDRON + n_nodes = 8 + else: + if f['/geometry/T_c'].attrs['VTK_TYPE'] == b'TRIANGLE': + vtk_type = vtk.VTK_TRIANGLE + n_nodes = 3 + elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'QUAD': + vtk_type = vtk.VTK_QUAD + n_nodes = 4 + elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'TETRA': # not tested + vtk_type = vtk.VTK_TETRA + n_nodes = 4 + elif f['/geometry/T_c'].attrs['VTK_TYPE'] == b'HEXAHEDRON': + vtk_type = vtk.VTK_HEXAHEDRON + n_nodes = 8 + + for i in f['/geometry/T_c']: + vtk_geom.InsertNextCell(vtk_type,n_nodes,i-1) + + elif mode == 'Point': + Points = vtk.vtkPoints() + Vertices = vtk.vtkCellArray() + for c in self.cell_coordinates(): + pointID = Points.InsertNextPoint(c) + Vertices.InsertNextCell(1) + Vertices.InsertCellPoint(pointID) + + vtk_geom = vtk.vtkPolyData() + vtk_geom.SetPoints(Points) + vtk_geom.SetVerts(Vertices) + vtk_geom.Modified() + + N_digits = int(np.floor(np.log10(int(self.increments[-1][3:]))))+1 + + for i,inc in enumerate(self.iter_visible('increments')): + vtk_data = [] + + materialpoints_backup = self.visible['materialpoints'].copy() + self.set_visible('materialpoints',False) + for label in (labels if isinstance(labels,list) else [labels]): + for p in self.iter_visible('con_physics'): + if p != 'generic': + for c in self.iter_visible('constituents'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + + else: + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name + dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name + vtk_data[-1].SetName(dset_name) + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + + self.set_visible('materialpoints',materialpoints_backup) + + constituents_backup = self.visible['constituents'].copy() + self.set_visible('constituents',False) + for label in (labels if isinstance(labels,list) else [labels]): + for p in self.iter_visible('mat_physics'): + if p != 'generic': + for m in self.iter_visible('materialpoints'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_? + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + else: + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), + deep=True,array_type= vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + self.set_visible('constituents',constituents_backup) + + if mode=='Cell': + writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ + vtk.vtkXMLUnstructuredGridWriter() + x = self.get_dataset_location('u_n') + vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0), + deep=True,array_type=vtk.VTK_DOUBLE)) + vtk_data[-1].SetName('u') + vtk_geom.GetPointData().AddArray(vtk_data[-1]) + elif mode == 'Point': + writer = vtk.vtkXMLPolyDataWriter() + + + file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0], + inc[3:].zfill(N_digits), + writer.GetDefaultFileExtension()) + + writer.SetCompressorTypeToZLib() + writer.SetDataModeToBinary() + writer.SetFileName(file_out) + writer.SetInputData(vtk_geom) + + writer.Write() From 8ae3346bd4606205deab37c2a31fdcf925378196 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 07:46:55 +0100 Subject: [PATCH 16/51] constituents/components are not handled by "view" (active) --- python/damask/dadf5.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index f61a471a9..b3833b21e 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -76,7 +76,6 @@ class DADF5(): self.visible= {'increments': self.increments, 'constituents': self.constituents, 'materialpoints': self.materialpoints, - 'constituent': range(self.Nconstituents), # ToDo: stupid naming 'con_physics': self.con_physics, 'mat_physics': self.mat_physics} From 98f5c601a36b0a6620165196671e3ce6d2cd2d6c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 07:58:10 +0100 Subject: [PATCH 17/51] line too long --- python/damask/dadf5.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index b3833b21e..76c87d7df 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -351,7 +351,8 @@ class DADF5(): for d in f[group].keys(): try: dataset = f['/'.join([group,d])] - message+=' {} / ({}): {}\n'.format(d,dataset.attrs['Unit'].decode(),dataset.attrs['Description'].decode()) + message+=' {} / ({}): {}\n'.\ + format(d,dataset.attrs['Unit'].decode(),dataset.attrs['Description'].decode()) except KeyError: pass return message From c1caef4bc91c299e012f2532026e0ac24113e36b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 08:06:58 +0100 Subject: [PATCH 18/51] fixing prospector complaints --- python/damask/dadf5.py | 11 ++++++----- python/damask/mechanics.py | 1 + python/damask/util.py | 3 +-- python/tests/test_mechanics.py | 1 - 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 76c87d7df..2d9533537 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -829,8 +829,8 @@ class DADF5(): for i,q in enumerate(q['data']): o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) - rotatedPole = o*pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(pole[2])) # stereographic projection + rotatedPole = o*unit_pole # rotate pole according to crystal orientation + (x,y) = rotatedPole[0:2]/(1.+abs(unit_pole[2])) # stereographic projection coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] return { @@ -839,8 +839,8 @@ class DADF5(): 'meta' : { 'Unit': '1', 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ - .format('Polar' if polar else 'Cartesian'), - 'Creator' : 'dadf5.py:addPole v{}'.format(version) + .format('Polar' if polar else 'Cartesian'), + 'Creator' : 'dadf5.py:add_pole v{}'.format(version) } } @@ -980,7 +980,8 @@ class DADF5(): args['results'].put({**args['func'](**args['in']),'group':args['group']}) env = Environment() - N_threads = 1#int(env.options['DAMASK_NUM_THREADS']) + N_threads = int(env.options['DAMASK_NUM_THREADS']) + N_threads //=N_threads # disable for the moment results = Queue(N_threads) pool = util.ThreadPool(N_threads) diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 79245e887..9502ebf92 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -299,6 +299,7 @@ def __Mises(x,s): Symmetric tensor of which the von Mises equivalent is computed. s : float Scaling factor (2/3 for strain, 3/2 for stress). + """ d = deviatoric_part(x) return np.sqrt(s*(np.sum(d**2.0))) if np.shape(x) == (3,3) else \ diff --git a/python/damask/util.py b/python/damask/util.py index 3ac59b78b..1cb7b8602 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -167,11 +167,10 @@ def progressBar(iteration, total, prefix='', bar_length=50): def scale_to_coprime(v): """Scale vector to co-prime (relatively prime) integers.""" - MAX_DENOMINATOR = 1000 def get_square_denominator(x): - """returns the denominator of the square of a number.""" + """Denominator of the square of a number.""" return Fraction(x ** 2).limit_denominator(MAX_DENOMINATOR).denominator def lcm(a, b): diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index 91e3d2d9e..6b2cc7087 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -217,7 +217,6 @@ class TestMechanics: A = mechanics.symmetric(np.random.random((self.n,3,3))) LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False)) RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True)) - s = np.random.randint(self.n) assert np.allclose(np.abs(LRHS),RHS) def test_spherical_no_shear(self): From 75280b9fb42e37a6a61e02bbd3ec5f9ed9d937fc Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 11:00:53 +0100 Subject: [PATCH 19/51] correct conversion for python3 --- processing/post/permuteData.py | 2 +- processing/pre/seeds_fromDistribution.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/processing/post/permuteData.py b/processing/post/permuteData.py index a589cb6da..81af71adb 100755 --- a/processing/post/permuteData.py +++ b/processing/post/permuteData.py @@ -83,7 +83,7 @@ for name in filenames: # ------------------------------------------ assemble header --------------------------------------- - randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed is None else options.randomSeed # random seed per file + randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file np.random.seed(randomSeed) table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]), diff --git a/processing/pre/seeds_fromDistribution.py b/processing/pre/seeds_fromDistribution.py index 2e8936f27..7bd97db60 100755 --- a/processing/pre/seeds_fromDistribution.py +++ b/processing/pre/seeds_fromDistribution.py @@ -1,11 +1,9 @@ #!/usr/bin/env python3 -# -*- coding: UTF-8 no BOM -*- import threading,time,os,sys,random import numpy as np from optparse import OptionParser from io import StringIO -import binascii import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -16,9 +14,10 @@ currentSeedsName = None #--------------------------------------------------------------------------------------------------- class myThread (threading.Thread): - """perturbes seed in seed file, performes Voronoi tessellation, evaluates, and updates best match""" + """Perturb seed in seed file, performes Voronoi tessellation, evaluates, and updates best match.""" def __init__(self, threadID): + """Threading class with thread ID.""" threading.Thread.__init__(self) self.threadID = threadID @@ -215,7 +214,7 @@ options = parser.parse_args()[0] damask.util.report(scriptName,options.seedFile) if options.randomSeed is None: - options.randomSeed = int(binascii.hexlify(os.urandom(4)),16) + options.randomSeed = int(os.urandom(4).hex(),16) damask.util.croak(options.randomSeed) delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2]) baseFile=os.path.splitext(os.path.basename(options.seedFile))[0] From 16ddd9c5b2518c45598e62a73bdfba18e74d7516 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 12:20:42 +0100 Subject: [PATCH 20/51] better name backport from dadf5-usability branch --- python/damask/dadf5.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 2d9533537..0bd4def28 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -73,11 +73,11 @@ class DADF5(): self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() self.mat_physics = list(set(self.mat_physics)) # make unique - self.visible= {'increments': self.increments, - 'constituents': self.constituents, - 'materialpoints': self.materialpoints, - 'con_physics': self.con_physics, - 'mat_physics': self.mat_physics} + self.selection= {'increments': self.increments, + 'constituents': self.constituents, + 'materialpoints': self.materialpoints, + 'con_physics': self.con_physics, + 'mat_physics': self.mat_physics} self.fname = fname @@ -92,7 +92,7 @@ class DADF5(): name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.selection) action : str select from 'set', 'add', and 'del' @@ -105,14 +105,14 @@ class DADF5(): choice = [datasets] if isinstance(datasets,str) else datasets valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_] - existing = set(self.visible[what]) + existing = set(self.selection[what]) if action == 'set': - self.visible[what] = valid + self.selection[what] = valid elif action == 'add': - self.visible[what] = list(existing.union(valid)) + self.selection[what] = list(existing.union(valid)) elif action == 'del': - self.visible[what] = list(existing.difference_update(valid)) + self.selection[what] = list(existing.difference_update(valid)) def __time_to_inc(self,start,end): @@ -229,17 +229,17 @@ class DADF5(): Parameters ---------- what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.selection) """ - datasets = self.visible[what] + datasets = self.selection[what] last_datasets = datasets.copy() for dataset in datasets: - if last_datasets != self.visible[what]: + if last_datasets != self.selection[what]: self.__manage_visible(datasets,what,'set') raise Exception self.__manage_visible(dataset,what,'set') - last_datasets = self.visible[what] + last_datasets = self.selection[what] yield dataset self.__manage_visible(datasets,what,'set') @@ -254,7 +254,7 @@ class DADF5(): name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.selection) """ self.__manage_visible(datasets,what,'set') @@ -270,7 +270,7 @@ class DADF5(): name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.selection) """ self.__manage_visible(datasets,what,'add') @@ -286,7 +286,7 @@ class DADF5(): name of datasets as list, supports ? and * wildcards. True is equivalent to [*], False is equivalent to [] what : str - attribute to change (must be in self.visible) + attribute to change (must be in self.selection) """ self.__manage_visible(datasets,what,'del') @@ -1094,7 +1094,7 @@ class DADF5(): for i,inc in enumerate(self.iter_visible('increments')): vtk_data = [] - materialpoints_backup = self.visible['materialpoints'].copy() + materialpoints_backup = self.selection['materialpoints'].copy() self.set_visible('materialpoints',False) for label in (labels if isinstance(labels,list) else [labels]): for p in self.iter_visible('con_physics'): @@ -1125,7 +1125,7 @@ class DADF5(): self.set_visible('materialpoints',materialpoints_backup) - constituents_backup = self.visible['constituents'].copy() + constituents_backup = self.selection['constituents'].copy() self.set_visible('constituents',False) for label in (labels if isinstance(labels,list) else [labels]): for p in self.iter_visible('mat_physics'): From a433f7ef54d6b68dc811b7509b17ba10b9c39509 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 13:03:50 +0100 Subject: [PATCH 21/51] style unification backport from dadf5-usability branch --- python/damask/dadf5.py | 228 ++++++++++++++++----------------- python/damask/mechanics.py | 12 +- python/tests/test_DADF5.py | 22 ++-- python/tests/test_mechanics.py | 12 +- 4 files changed, 137 insertions(+), 137 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 0bd4def28..d8528408c 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -454,7 +454,7 @@ class DADF5(): Parameters ---------- x : str - Label of the dataset containing a scalar, vector, or tensor. + Label of scalar, vector, or tensor dataset to take absolute value of. """ def _add_absolute(x): @@ -472,25 +472,25 @@ class DADF5(): self.__add_generic_pointwise(_add_absolute,{'x':x}) - def add_calculation(self,formula,label,unit='n/a',description=None,vectorized=True): + def add_calculation(self,label,formula,unit='n/a',description=None,vectorized=True): """ Add result of a general formula. Parameters ---------- - formula : str - Formula, refer to datasets by ‘#Label#‘. label : str - Label of the dataset containing the result of the calculation. + Label of resulting dataset. + formula : str + Formula to calculate resulting dataset. Existing datasets are referenced by ‘#TheirLabel#‘. unit : str, optional Physical unit of the result. description : str, optional - Human readable description of the result. + Human-readable description of the result. vectorized : bool, optional - Indicate whether the formula is written in vectorized form. Default is ‘True’. + Indicate whether the formula can be used in vectorized form. Defaults to ‘True’. """ - if vectorized is False: + if not vectorized: raise NotImplementedError def _add_calculation(**kwargs): @@ -515,22 +515,22 @@ class DADF5(): self.__add_generic_pointwise(_add_calculation,dataset_mapping,args) - def add_Cauchy(self,F='F',P='P'): + def add_Cauchy(self,P='P',F='F'): """ - Add Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + Add Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient. Parameters ---------- P : str, optional - Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. + Label of the dataset containing the first Piola-Kirchhoff stress. Defaults to ‘P’. F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. + Label of the dataset containing the deformation gradient. Defaults to ‘F’. """ - def _add_Cauchy(F,P): + def _add_Cauchy(P,F): return { - 'data': mechanics.Cauchy(F['data'],P['data']), + 'data': mechanics.Cauchy(P['data'],F['data']), 'label': 'sigma', 'meta': { 'Unit': P['meta']['Unit'], @@ -541,110 +541,110 @@ class DADF5(): } } - self.__add_generic_pointwise(_add_Cauchy,{'F':F,'P':P}) + self.__add_generic_pointwise(_add_Cauchy,{'P':P,'F':F}) - def add_determinant(self,x): + def add_determinant(self,T): """ Add the determinant of a tensor. Parameters ---------- - x : str - Label of the dataset containing a tensor. + T : str + Label of tensor dataset. """ - def _add_determinant(x): + def _add_determinant(T): return { - 'data': np.linalg.det(x['data']), - 'label': 'det({})'.format(x['label']), + 'data': np.linalg.det(T['data']), + 'label': 'det({})'.format(T['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Determinant of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Unit': T['meta']['Unit'], + 'Description': 'Determinant of tensor {} ({})'.format(T['label'],T['meta']['Description']), 'Creator': 'dadf5.py:add_determinant v{}'.format(version) } } - self.__add_generic_pointwise(_add_determinant,{'x':x}) + self.__add_generic_pointwise(_add_determinant,{'T':T}) - def add_deviator(self,x): + def add_deviator(self,T): """ Add the deviatoric part of a tensor. Parameters ---------- - x : str - Label of the dataset containing a tensor. + T : str + Label of tensor dataset. """ - def _add_deviator(x): + def _add_deviator(T): - if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): + if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): raise ValueError return { - 'data': mechanics.deviatoric_part(x['data']), - 'label': 's_{}'.format(x['label']), + 'data': mechanics.deviatoric_part(T['data']), + 'label': 's_{}'.format(T['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Deviator of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Unit': T['meta']['Unit'], + 'Description': 'Deviator of tensor {} ({})'.format(T['label'],T['meta']['Description']), 'Creator': 'dadf5.py:add_deviator v{}'.format(version) } } - self.__add_generic_pointwise(_add_deviator,{'x':x}) + self.__add_generic_pointwise(_add_deviator,{'T':T}) - def add_eigenvalues(self,x): + def add_eigenvalues(self,S): """ Add eigenvalues of symmetric tensor. Parameters ---------- - x : str - Label of the dataset containing a symmetric tensor. + S : str + Label of symmetric tensor dataset. """ - def _add_eigenvalue(x): + def _add_eigenvalue(S): return { - 'data': mechanics.eigenvalues(x['data']), - 'label': 'lambda({})'.format(x['label']), + 'data': mechanics.eigenvalues(S['data']), + 'label': 'lambda({})'.format(S['label']), 'meta' : { - 'Unit': x['meta']['Unit'], - 'Description': 'Eigenvalues of {} ({})'.format(x['label'],x['meta']['Description']), + 'Unit': S['meta']['Unit'], + 'Description': 'Eigenvalues of {} ({})'.format(S['label'],S['meta']['Description']), 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) } } - self.__add_generic_pointwise(_add_eigenvalue,{'x':x}) + self.__add_generic_pointwise(_add_eigenvalue,{'S':S}) - def add_eigenvectors(self,x): + def add_eigenvectors(self,S): """ Add eigenvectors of symmetric tensor. Parameters ---------- - x : str - Label of the dataset containing a symmetric tensor. + S : str + Label of symmetric tensor dataset. """ - def _add_eigenvector(x): + def _add_eigenvector(S): return { - 'data': mechanics.eigenvectors(x['data']), - 'label': 'v({})'.format(x['label']), + 'data': mechanics.eigenvectors(S['data']), + 'label': 'v({})'.format(S['label']), 'meta' : { 'Unit': '1', - 'Description': 'Eigenvectors of {} ({})'.format(x['label'],x['meta']['Description']), + 'Description': 'Eigenvectors of {} ({})'.format(S['label'],S['meta']['Description']), 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) } } - self.__add_generic_pointwise(_add_eigenvector,{'x':x}) + self.__add_generic_pointwise(_add_eigenvector,{'S':S}) def add_IPFcolor(self,q,p): @@ -654,9 +654,9 @@ class DADF5(): Parameters ---------- q : str - Label of the dataset containing the orientation data as quaternions. - p : list of int - Pole direction as Miller indices. + Label of the dataset containing the crystallographic orientation as quaternions. + p : list of int #ToDo: Direction / int? + Pole (crystallographic direction or plane). """ def _add_IPFcolor(q,p): @@ -686,56 +686,56 @@ class DADF5(): self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'p':p}) - def add_maximum_shear(self,x): + def add_maximum_shear(self,S): """ Add maximum shear components of symmetric tensor. Parameters ---------- - x : str - Label of the dataset containing a symmetric tensor. + S : str + Label of symmetric tensor dataset. """ - def _add_maximum_shear(x): + def _add_maximum_shear(S): return { - 'data': mechanics.maximum_shear(x['data']), - 'label': 'max_shear({})'.format(x['label']), + 'data': mechanics.maximum_shear(S['data']), + 'label': 'max_shear({})'.format(S['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Maximum shear component of of {} ({})'.format(x['label'],x['meta']['Description']), + 'Unit': S['meta']['Unit'], + 'Description': 'Maximum shear component of {} ({})'.format(S['label'],S['meta']['Description']), 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) } } - self.__add_generic_pointwise(_add_maximum_shear,{'x':x}) + self.__add_generic_pointwise(_add_maximum_shear,{'S':S}) - def add_Mises(self,x): + def add_Mises(self,S): """ Add the equivalent Mises stress or strain of a symmetric tensor. Parameters ---------- - x : str - Label of the dataset containing a symmetric stress or strain tensor. + S : str + Label of symmetric tensorial stress or strain dataset. """ - def _add_Mises(x): + def _add_Mises(S): - t = 'strain' if x['meta']['Unit'] == '1' else \ + t = 'strain' if S['meta']['Unit'] == '1' else \ 'stress' return { - 'data': mechanics.Mises_strain(x['data']) if t=='strain' else mechanics.Mises_stress(x['data']), - 'label': '{}_vM'.format(x['label']), + 'data': mechanics.Mises_strain(S['data']) if t=='strain' else mechanics.Mises_stress(S['data']), + 'label': '{}_vM'.format(S['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Mises equivalent {} of {} ({})'.format(t,x['label'],x['meta']['Description']), + 'Unit': S['meta']['Unit'], + 'Description': 'Mises equivalent {} of {} ({})'.format(t,S['label'],S['meta']['Description']), 'Creator': 'dadf5.py:add_Mises v{}'.format(version) } } - self.__add_generic_pointwise(_add_Mises,{'x':x}) + self.__add_generic_pointwise(_add_Mises,{'S':S}) def add_norm(self,x,ord=None): @@ -745,9 +745,9 @@ class DADF5(): Parameters ---------- x : str - Label of the dataset containing a vector or tensor. + Label of vector or tensor dataset. ord : {non-zero int, inf, -inf, ‘fro’, ‘nuc’}, optional - Order of the norm. inf means numpy’s inf object. For details refer to numpy.linalg.norm. + Order of the norm. inf means NumPy’s inf object. For details refer to numpy.linalg.norm. """ def _add_norm(x,ord): @@ -769,7 +769,7 @@ class DADF5(): 'label': '|{}|_{}'.format(x['label'],o), 'meta': { 'Unit': x['meta']['Unit'], - 'Description': '{}-Norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), + 'Description': '{}-norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), 'Creator': 'dadf5.py:add_norm v{}'.format(version) } } @@ -777,22 +777,22 @@ class DADF5(): self.__add_generic_pointwise(_add_norm,{'x':x},{'ord':ord}) - def add_PK2(self,F='F',P='P'): + def add_PK2(self,P='P',F='F'): """ - Add 2. Piola-Kirchhoff calculated from 1. Piola-Kirchhoff stress and deformation gradient. + Add 2. Piola-Kirchhoff calculated from first Piola-Kirchhoff stress and deformation gradient. Parameters ---------- P : str, optional - Label of the dataset containing the 1. Piola-Kirchhoff stress. Default value is ‘P’. + Label first Piola-Kirchhoff stress dataset. Defaults to ‘P’. F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. + Label of deformation gradient dataset. Defaults to ‘F’. """ - def _add_PK2(F,P): + def _add_PK2(P,F): return { - 'data': mechanics.PK2(F['data'],P['data']), + 'data': mechanics.PK2(P['data'],F['data']), 'label': 'S', 'meta': { 'Unit': P['meta']['Unit'], @@ -803,7 +803,7 @@ class DADF5(): } } - self.__add_generic_pointwise(_add_PK2,{'F':F,'P':P}) + self.__add_generic_pointwise(_add_PK2,{'P':P,'F':F}) def add_pole(self,q,p,polar=False): @@ -813,11 +813,11 @@ class DADF5(): Parameters ---------- q : str - Label of the dataset containing the crystallographic orientation as a quaternion. + Label of the dataset containing the crystallographic orientation as quaternions. p : numpy.array of shape (3) Pole in crystal frame. polar : bool, optional - Give pole in polar coordinates. Default is false. + Give pole in polar coordinates. Defaults to false. """ def _add_pole(q,p,polar): @@ -853,8 +853,8 @@ class DADF5(): Parameters ---------- - F : str - Label of the dataset containing a deformation gradient. Default value is ‘F’. + F : str, optional + Label of deformation gradient dataset. """ def _add_rotational_part(F): @@ -872,49 +872,49 @@ class DADF5(): self.__add_generic_pointwise(_add_rotational_part,{'F':F}) - def add_spherical(self,x): + def add_spherical(self,T): """ Add the spherical (hydrostatic) part of a tensor. Parameters ---------- - x : str - Label of the dataset containing a tensor. + T : str + Label of tensor dataset. """ - def _add_spherical(x): + def _add_spherical(T): - if not np.all(np.array(x['data'].shape[1:]) == np.array([3,3])): + if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): raise ValueError return { - 'data': mechanics.spherical_part(x['data']), - 'label': 'p_{}'.format(x['label']), + 'data': mechanics.spherical_part(T['data']), + 'label': 'p_{}'.format(T['label']), 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Spherical component of tensor {} ({})'.format(x['label'],x['meta']['Description']), + 'Unit': T['meta']['Unit'], + 'Description': 'Spherical component of tensor {} ({})'.format(T['label'],T['meta']['Description']), 'Creator': 'dadf5.py:add_spherical v{}'.format(version) } } - self.__add_generic_pointwise(_add_spherical,{'x':x}) + self.__add_generic_pointwise(_add_spherical,{'T':T}) - def add_strain_tensor(self,F='F',t='U',m=0): + def add_strain_tensor(self,F='F',t='V',m=0.0): """ - Add strain tensor calculated from a deformation gradient. + Add strain tensor of a deformation gradient. For details refer to damask.mechanics.strain_tensor Parameters ---------- F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. + Label of deformation gradient dataset. Defaults to ‘F’. t : {‘V’, ‘U’}, optional - Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. - Default value is ‘U’. + Type of the polar decomposition, ‘V’ for left stretch tensor and ‘U’ for right stretch tensor. + Defaults to ‘V’. m : float, optional - Order of the strain calculation. Default value is ‘0.0’. + Order of the strain calculation. Defaults to ‘0.0’. """ def _add_strain_tensor(F,t,m): @@ -932,17 +932,17 @@ class DADF5(): self.__add_generic_pointwise(_add_strain_tensor,{'F':F},{'t':t,'m':m}) - def add_stretch_tensor(self,F='F',t='U'): + def add_stretch_tensor(self,F='F',t='V'): """ - Add stretch tensor calculated from a deformation gradient. + Add stretch tensor of a deformation gradient. Parameters ---------- F : str, optional - Label of the dataset containing the deformation gradient. Default value is ‘F’. + Label of deformation gradient dataset. Defaults to ‘F’. t : {‘V’, ‘U’}, optional - Type of the polar decomposition, ‘U’ for right stretch tensor and ‘V’ for left stretch tensor. - Default value is ‘U’. + Type of the polar decomposition, ‘V’ for left stretch tensor and ‘U’ for right stretch tensor. + Defaults to ‘V’. """ def _add_stretch_tensor(F,t): @@ -969,8 +969,8 @@ class DADF5(): ---------- func : function Function that calculates a new dataset from one or more datasets per HDF5 group. - datasets_requested : list of dictionaries - Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func). + dataset_mapping : dictionary + Mapping HDF5 data label to callback function argument extra_args : dictionary, optional Any extra arguments parsed to func. @@ -1018,7 +1018,7 @@ class DADF5(): pool.wait_completion() - def to_vtk(self,labels,mode='Cell'): + def to_vtk(self,labels,mode='cell'): """ Export to vtk cell/point data. @@ -1026,12 +1026,12 @@ class DADF5(): ---------- labels : str or list of Labels of the datasets to be exported. - mode : str, either 'Cell' or 'Point' + mode : str, either 'cell' or 'point' Export in cell format or point format. - Default value is 'Cell'. + Defaults to 'cell'. """ - if mode=='Cell': + if mode.lower()=='cell': if self.structured: diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 9502ebf92..9ffd76536 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -1,8 +1,8 @@ import numpy as np -def Cauchy(F,P): +def Cauchy(P,F): """ - Return Cauchy stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + Return Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient. Resulting tensor is symmetrized as the Cauchy stress needs to be symmetric. @@ -129,16 +129,16 @@ def Mises_stress(sigma): return __Mises(sigma,3.0/2.0) -def PK2(F,P): +def PK2(P,F): """ - Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + Calculate second Piola-Kirchhoff stress from first Piola-Kirchhoff stress and deformation gradient. Parameters ---------- - F : numpy.array of shape (:,3,3) or (3,3) - Deformation gradient. P : numpy.array of shape (:,3,3) or (3,3) 1. Piola-Kirchhoff stress. + F : numpy.array of shape (:,3,3) or (3,3) + Deformation gradient. """ if np.shape(F) == np.shape(P) == (3,3): diff --git a/python/tests/test_DADF5.py b/python/tests/test_DADF5.py index a2f6cc920..4cf3c4e2b 100644 --- a/python/tests/test_DADF5.py +++ b/python/tests/test_DADF5.py @@ -40,7 +40,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_calculation(self,default): - default.add_calculation('2.0*np.abs(#F#)-1.0','x','-','test') + default.add_calculation('x','2.0*np.abs(#F#)-1.0','-','my notes') loc = {'F': default.get_dataset_location('F'), 'x': default.get_dataset_location('x')} in_memory = 2.0*np.abs(default.read_dataset(loc['F'],0))-1.0 @@ -48,12 +48,12 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_Cauchy(self,default): - default.add_Cauchy('F','P') + default.add_Cauchy('P','F') loc = {'F': default.get_dataset_location('F'), 'P': default.get_dataset_location('P'), 'sigma':default.get_dataset_location('sigma')} - in_memory = mechanics.Cauchy(default.read_dataset(loc['F'],0), - default.read_dataset(loc['P'],0)) + in_memory = mechanics.Cauchy(default.read_dataset(loc['P'],0), + default.read_dataset(loc['F'],0)) in_file = default.read_dataset(loc['sigma'],0) assert np.allclose(in_memory,in_file) @@ -74,7 +74,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_eigenvalues(self,default): - default.add_Cauchy('F','P') + default.add_Cauchy('P','F') default.add_eigenvalues('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'lambda(sigma)':default.get_dataset_location('lambda(sigma)')} @@ -83,7 +83,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_eigenvectors(self,default): - default.add_Cauchy('F','P') + default.add_Cauchy('P','F') default.add_eigenvectors('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'v(sigma)':default.get_dataset_location('v(sigma)')} @@ -92,7 +92,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_maximum_shear(self,default): - default.add_Cauchy('F','P') + default.add_Cauchy('P','F') default.add_maximum_shear('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'max_shear(sigma)':default.get_dataset_location('max_shear(sigma)')} @@ -113,7 +113,7 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_Mises_stress(self,default): - default.add_Cauchy('F','P') + default.add_Cauchy('P','F') default.add_Mises('sigma') loc = {'sigma' :default.get_dataset_location('sigma'), 'sigma_vM':default.get_dataset_location('sigma_vM')} @@ -130,12 +130,12 @@ class TestDADF5: assert np.allclose(in_memory,in_file) def test_add_PK2(self,default): - default.add_PK2('F','P') + default.add_PK2('P','F') loc = {'F':default.get_dataset_location('F'), 'P':default.get_dataset_location('P'), 'S':default.get_dataset_location('S')} - in_memory = mechanics.PK2(default.read_dataset(loc['F'],0), - default.read_dataset(loc['P'],0)) + in_memory = mechanics.PK2(default.read_dataset(loc['P'],0), + default.read_dataset(loc['F'],0)) in_file = default.read_dataset(loc['S'],0) assert np.allclose(in_memory,in_file) diff --git a/python/tests/test_mechanics.py b/python/tests/test_mechanics.py index 6b2cc7087..4248254ab 100644 --- a/python/tests/test_mechanics.py +++ b/python/tests/test_mechanics.py @@ -10,8 +10,8 @@ class TestMechanics: def test_vectorize_Cauchy(self): P = np.random.random((self.n,3,3)) F = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Cauchy(F,P)[self.c], - mechanics.Cauchy(F[self.c],P[self.c])) + assert np.allclose(mechanics.Cauchy(P,F)[self.c], + mechanics.Cauchy(P[self.c],F[self.c])) def test_vectorize_deviatoric_part(self): x = np.random.random((self.n,3,3)) @@ -51,8 +51,8 @@ class TestMechanics: def test_vectorize_PK2(self): F = np.random.random((self.n,3,3)) P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.PK2(F,P)[self.c], - mechanics.PK2(F[self.c],P[self.c])) + assert np.allclose(mechanics.PK2(P,F)[self.c], + mechanics.PK2(P[self.c],F[self.c])) def test_vectorize_right_stretch(self): x = np.random.random((self.n,3,3)) @@ -90,7 +90,7 @@ class TestMechanics: def test_Cauchy(self): """Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.Cauchy(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + assert np.allclose(mechanics.Cauchy(P,np.broadcast_to(np.eye(3),(self.n,3,3))), mechanics.symmetric(P)) @@ -107,7 +107,7 @@ class TestMechanics: def test_PK2(self): """Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation.""" P = np.random.random((self.n,3,3)) - assert np.allclose(mechanics.PK2(np.broadcast_to(np.eye(3),(self.n,3,3)),P), + assert np.allclose(mechanics.PK2(P,np.broadcast_to(np.eye(3),(self.n,3,3))), mechanics.symmetric(P)) From 402c282e687531cdf0d75a95e34ef7d1e945819c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 13:27:41 +0100 Subject: [PATCH 22/51] better names backport form dadf5-usability --- src/results.f90 | 52 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/results.f90 b/src/results.f90 index 8708f7856..cfd495a71 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -485,15 +485,15 @@ end subroutine results_writeScalarDataset_rotation !-------------------------------------------------------------------------------------------------- !> @brief adds the unique mapping from spatial position and constituent ID to results !-------------------------------------------------------------------------------------------------- -subroutine results_mapping_constituent(phaseAt,memberAt,label) +subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element) - integer, dimension(:,:,:), intent(in) :: memberAt !< phase member at (constituent,IP,element) - character(len=*), dimension(:), intent(in) :: label !< label of each phase section + integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element) + character(len=pStringLen), dimension(:), intent(in) :: label !< label of each phase section - integer, dimension(size(memberAt,1),size(memberAt,2),size(memberAt,3)) :: & - phaseAt_perIP, & - memberAt_total + integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2),size(memberAtLocal,3)) :: & + phaseAtMaterialpoint, & + memberAtGlobal integer, dimension(size(label),0:worldsize-1) :: memberOffset !< offset in member counting per process integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & @@ -543,10 +543,10 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr) memberOffset = 0 do i=1, size(label) - memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAt,2) ! number of points/instance of this process + memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process enddo writeSize = 0 - writeSize(worldrank) = size(memberAt(1,:,:)) ! total number of points by this process + writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process !-------------------------------------------------------------------------------------------------- ! MPI settings and communication @@ -578,14 +578,14 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label) !--------------------------------------------------------------------------------------------------- ! expand phaseAt to consider IPs (is not stored per IP) - do i = 1, size(phaseAt_perIP,2) - phaseAt_perIP(:,i,:) = phaseAt + do i = 1, size(phaseAtMaterialpoint,2) + phaseAtMaterialpoint(:,i,:) = phaseAt enddo !--------------------------------------------------------------------------------------------------- ! renumber member from my process to all processes do i = 1, size(label) - where(phaseAt_perIP == i) memberAt_total = memberAt + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based + where(phaseAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based enddo !-------------------------------------------------------------------------------------------------- @@ -596,10 +596,10 @@ subroutine results_mapping_constituent(phaseAt,memberAt,label) call h5dcreate_f(loc_id, 'constituent', dtype_id, filespace_id, dset_id, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dcreate_f') - call h5dwrite_f(dset_id, name_id, reshape(label(pack(phaseAt_perIP,.true.)),myShape), & + call h5dwrite_f(dset_id, name_id, reshape(label(pack(phaseAtMaterialpoint,.true.)),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dwrite_f/name_id') - call h5dwrite_f(dset_id, position_id, reshape(pack(memberAt_total,.true.),myShape), & + call h5dwrite_f(dset_id, position_id, reshape(pack(memberAtGlobal,.true.),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_constituent: h5dwrite_f/position_id') @@ -620,15 +620,15 @@ end subroutine results_mapping_constituent !-------------------------------------------------------------------------------------------------- !> @brief adds the unique mapping from spatial position and constituent ID to results !-------------------------------------------------------------------------------------------------- -subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label) +subroutine results_mapping_materialpoint(homogenizationAt,memberAtLocal,label) integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element) - integer, dimension(:,:), intent(in) :: memberAt !< homogenization member at (IP,element) - character(len=*), dimension(:), intent(in) :: label !< label of each homogenization section + integer, dimension(:,:), intent(in) :: memberAtLocal !< homogenization member at (IP,element) + character(len=pStringLen), dimension(:), intent(in) :: label !< label of each homogenization section - integer, dimension(size(memberAt,1),size(memberAt,2)) :: & - homogenizationAt_perIP, & - memberAt_total + integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2)) :: & + homogenizationAtMaterialpoint, & + memberAtGlobal integer, dimension(size(label),0:worldsize-1) :: memberOffset !< offset in member counting per process integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(1) :: & @@ -678,10 +678,10 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, ierr) memberOffset = 0 do i=1, size(label) - memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAt,1) ! number of points/instance of this process + memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAtLocal,1) ! number of points/instance of this process enddo writeSize = 0 - writeSize(worldrank) = size(memberAt) ! total number of points by this process + writeSize(worldrank) = size(memberAtLocal) ! total number of points by this process !-------------------------------------------------------------------------------------------------- ! MPI settings and communication @@ -713,14 +713,14 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label) !--------------------------------------------------------------------------------------------------- ! expand phaseAt to consider IPs (is not stored per IP) - do i = 1, size(homogenizationAt_perIP,1) - homogenizationAt_perIP(i,:) = homogenizationAt + do i = 1, size(homogenizationAtMaterialpoint,1) + homogenizationAtMaterialpoint(i,:) = homogenizationAt enddo !--------------------------------------------------------------------------------------------------- ! renumber member from my process to all processes do i = 1, size(label) - where(homogenizationAt_perIP == i) memberAt_total = memberAt + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based + where(homogenizationAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based enddo !-------------------------------------------------------------------------------------------------- @@ -731,10 +731,10 @@ subroutine results_mapping_materialpoint(homogenizationAt,memberAt,label) call h5dcreate_f(loc_id, 'materialpoint', dtype_id, filespace_id, dset_id, ierr) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dcreate_f') - call h5dwrite_f(dset_id, name_id, reshape(label(pack(homogenizationAt_perIP,.true.)),myShape), & + call h5dwrite_f(dset_id, name_id, reshape(label(pack(homogenizationAtMaterialpoint,.true.)),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dwrite_f/name_id') - call h5dwrite_f(dset_id, position_id, reshape(pack(memberAt_total,.true.),myShape), & + call h5dwrite_f(dset_id, position_id, reshape(pack(memberAtGlobal,.true.),myShape), & myShape, ierr, file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id) if (ierr < 0) call IO_error(1,ext_msg='results_mapping_materialpoint: h5dwrite_f/position_id') From 6a0760a13c897dad49bfa19f8f7400fc8dc6b181 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 17:42:01 +0100 Subject: [PATCH 23/51] documentation polishing --- python/damask/dadf5.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index d8528408c..4ad6b14a6 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -647,7 +647,7 @@ class DADF5(): self.__add_generic_pointwise(_add_eigenvector,{'S':S}) - def add_IPFcolor(self,q,p): + def add_IPFcolor(self,q,l): """ Add RGB color tuple of inverse pole figure (IPF) color. @@ -655,26 +655,26 @@ class DADF5(): ---------- q : str Label of the dataset containing the crystallographic orientation as quaternions. - p : list of int #ToDo: Direction / int? - Pole (crystallographic direction or plane). + l : numpy.array of shape (3) + Lab frame direction for inverse pole figure. """ - def _add_IPFcolor(q,p): + def _add_IPFcolor(q,l): - pole = np.array(p) - unit_pole = pole/np.linalg.norm(pole) - m = util.scale_to_coprime(pole) - colors = np.empty((len(q['data']),3),np.uint8) + d = np.array(l) + d_unit = pole/np.linalg.norm(d) + m = util.scale_to_coprime(d) + colors = np.empty((len(q['data']),3),np.uint8) lattice = q['meta']['Lattice'] for i,q in enumerate(q['data']): o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() - colors[i] = np.uint8(o.IPFcolor(unit_pole)*255) + colors[i] = np.uint8(o.IPFcolor(d_unit)*255) return { 'data': colors, - 'label': 'IPFcolor_{{{} {} {}>'.format(*m), + 'label': 'IPFcolor_[{} {} {}]'.format(*m), 'meta' : { 'Unit': 'RGB (8bit)', 'Lattice': lattice, @@ -683,7 +683,7 @@ class DADF5(): } } - self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'p':p}) + self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'l':l}) def add_maximum_shear(self,S): @@ -815,9 +815,9 @@ class DADF5(): q : str Label of the dataset containing the crystallographic orientation as quaternions. p : numpy.array of shape (3) - Pole in crystal frame. + Crystallographic direction or plane. polar : bool, optional - Give pole in polar coordinates. Defaults to false. + Give pole in polar coordinates. Defaults to False. """ def _add_pole(q,p,polar): From ffb80981b1508751bc8a954f659efaad8ed95f94 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 17:47:47 +0100 Subject: [PATCH 24/51] use central functionality --- python/damask/dadf5.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 4ad6b14a6..bdf6886d1 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -14,6 +14,7 @@ from . import mechanics from . import Rotation from . import Orientation from . import Environment +from . import grid_filters class DADF5(): """ @@ -436,15 +437,10 @@ class DADF5(): def cell_coordinates(self): """Return initial coordinates of the cell centers.""" if self.structured: - delta = self.size/self.grid*0.5 - z, y, x = np.meshgrid(np.linspace(delta[2],self.size[2]-delta[2],self.grid[2]), - np.linspace(delta[1],self.size[1]-delta[1],self.grid[1]), - np.linspace(delta[0],self.size[0]-delta[0],self.grid[0]), - ) - return np.concatenate((x[:,:,:,None],y[:,:,:,None],z[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) + return grid_filters.cell_coord0(self.grid,self.size,self.origin) else: - with h5py.File(self.fname,'r') as f: - return f['geometry/x_c'][()] + with h5py.File(self.fname,'r') as f: + return f['geometry/x_c'][()] def add_absolute(self,x): @@ -662,7 +658,7 @@ class DADF5(): def _add_IPFcolor(q,l): d = np.array(l) - d_unit = pole/np.linalg.norm(d) + d_unit = d/np.linalg.norm(d) m = util.scale_to_coprime(d) colors = np.empty((len(q['data']),3),np.uint8) @@ -679,7 +675,7 @@ class DADF5(): 'Unit': 'RGB (8bit)', 'Lattice': lattice, 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), - 'Creator': 'dadf5.py:addIPFcolor v{}'.format(version) + 'Creator': 'dadf5.py:add_IPFcolor v{}'.format(version) } } From 071dc6f4a53c03ea312f0b51e1f31bd3d61af067 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 18:50:30 +0100 Subject: [PATCH 25/51] adjusted to new signature --- processing/post/addCauchy.py | 4 ++-- processing/post/addPK2.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/processing/post/addCauchy.py b/processing/post/addCauchy.py index afc5a57be..8bbc7fb0e 100755 --- a/processing/post/addCauchy.py +++ b/processing/post/addCauchy.py @@ -42,8 +42,8 @@ for name in filenames: table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) table.add('Cauchy', - damask.mechanics.Cauchy(table.get(options.defgrad).reshape(-1,3,3), - table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + damask.mechanics.Cauchy(table.get(options.stress ).reshape(-1,3,3), + table.get(options.defgrad).reshape(-1,3,3)).reshape(-1,9), scriptID+' '+' '.join(sys.argv[1:])) table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py index 185160d79..2894a5a90 100755 --- a/processing/post/addPK2.py +++ b/processing/post/addPK2.py @@ -43,8 +43,8 @@ for name in filenames: table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) table.add('S', - damask.mechanics.PK2(table.get(options.defgrad).reshape(-1,3,3), - table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + damask.mechanics.PK2(table.get(options.stress ).reshape(-1,3,3), + table.get(options.defgrad).reshape(-1,3,3)).reshape(-1,9), scriptID+' '+' '.join(sys.argv[1:])) table.to_ASCII(sys.stdout if name is None else name) From b9966b95e0d682b2454d0afc4e4fe35367f218ac Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 18:52:58 +0100 Subject: [PATCH 26/51] consistently use small letters --- python/damask/dadf5.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index bdf6886d1..191b2203b 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1072,7 +1072,7 @@ class DADF5(): for i in f['/geometry/T_c']: vtk_geom.InsertNextCell(vtk_type,n_nodes,i-1) - elif mode == 'Point': + elif mode.lower()=='point': Points = vtk.vtkPoints() Vertices = vtk.vtkCellArray() for c in self.cell_coordinates(): @@ -1148,7 +1148,7 @@ class DADF5(): vtk_geom.GetCellData().AddArray(vtk_data[-1]) self.set_visible('constituents',constituents_backup) - if mode=='Cell': + if mode.lower()=='cell': writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ vtk.vtkXMLUnstructuredGridWriter() x = self.get_dataset_location('u_n') @@ -1156,7 +1156,7 @@ class DADF5(): deep=True,array_type=vtk.VTK_DOUBLE)) vtk_data[-1].SetName('u') vtk_geom.GetPointData().AddArray(vtk_data[-1]) - elif mode == 'Point': + elif mode.lower()=='point': writer = vtk.vtkXMLPolyDataWriter() From 05476ff85cd74965f361cfbe96db0ed9f1ac52a0 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 19:24:26 +0100 Subject: [PATCH 27/51] parallelize addition of datasets threads does not work, muliprocessing shows good performance: Overhead is small compared to the performance gain. Especially useful for long running functions of the orientation class --- python/damask/dadf5.py | 632 ++++++++++++++++++++--------------------- python/damask/util.py | 54 ---- 2 files changed, 307 insertions(+), 379 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 191b2203b..d27b0828f 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1,7 +1,8 @@ -from queue import Queue +import multiprocessing import re import glob import os +from functools import partial import vtk from vtk.util import numpy_support @@ -443,6 +444,17 @@ class DADF5(): return f['geometry/x_c'][()] + @staticmethod + def _add_absolute(x): + return { + 'data': np.abs(x['data']), + 'label': '|{}|'.format(x['label']), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_abs v{}'.format(version) + } + } def add_absolute(self,x): """ Add absolute value. @@ -453,21 +465,24 @@ class DADF5(): Label of scalar, vector, or tensor dataset to take absolute value of. """ - def _add_absolute(x): - - return { - 'data': np.abs(x['data']), - 'label': '|{}|'.format(x['label']), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': 'Absolute value of {} ({})'.format(x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_abs v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_absolute,{'x':x}) + self.__add_generic_pointwise(self._add_absolute,{'x':x}) + @staticmethod + def _add_calculation(**kwargs): + formula = kwargs['formula'] + for d in re.findall(r'#(.*?)#',formula): + formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) + + return { + 'data': eval(formula), + 'label': kwargs['label'], + 'meta': { + 'Unit': kwargs['unit'], + 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), + 'Creator': 'dadf5.py:add_calculation v{}'.format(version) + } + } def add_calculation(self,label,formula,unit='n/a',description=None,vectorized=True): """ Add result of a general formula. @@ -489,28 +504,24 @@ class DADF5(): if not vectorized: raise NotImplementedError - def _add_calculation(**kwargs): - - formula = kwargs['formula'] - for d in re.findall(r'#(.*?)#',formula): - formula = formula.replace('#{}#'.format(d),"kwargs['{}']['data']".format(d)) - - return { - 'data': eval(formula), - 'label': kwargs['label'], - 'meta': { - 'Unit': kwargs['unit'], - 'Description': '{} (formula: {})'.format(kwargs['description'],kwargs['formula']), - 'Creator': 'dadf5.py:add_calculation v{}'.format(version) - } - } - - dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula + dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula args = {'formula':formula,'label':label,'unit':unit,'description':description} - - self.__add_generic_pointwise(_add_calculation,dataset_mapping,args) + self.__add_generic_pointwise(self._add_calculation,dataset_mapping,args) + @staticmethod + def _add_Cauchy(P,F): + return { + 'data': mechanics.Cauchy(P['data'],F['data']), + 'label': 'sigma', + 'meta': { + 'Unit': P['meta']['Unit'], + 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) + } + } def add_Cauchy(self,P='P',F='F'): """ Add Cauchy stress calculated from first Piola-Kirchhoff stress and deformation gradient. @@ -523,23 +534,20 @@ class DADF5(): Label of the dataset containing the deformation gradient. Defaults to ‘F’. """ - def _add_Cauchy(P,F): - - return { - 'data': mechanics.Cauchy(P['data'],F['data']), - 'label': 'sigma', - 'meta': { - 'Unit': P['meta']['Unit'], - 'Description': 'Cauchy stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_Cauchy v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_Cauchy,{'P':P,'F':F}) + self.__add_generic_pointwise(self._add_Cauchy,{'P':P,'F':F}) + @staticmethod + def _add_determinant(T): + return { + 'data': np.linalg.det(T['data']), + 'label': 'det({})'.format(T['label']), + 'meta': { + 'Unit': T['meta']['Unit'], + 'Description': 'Determinant of tensor {} ({})'.format(T['label'],T['meta']['Description']), + 'Creator': 'dadf5.py:add_determinant v{}'.format(version) + } + } def add_determinant(self,T): """ Add the determinant of a tensor. @@ -550,21 +558,23 @@ class DADF5(): Label of tensor dataset. """ - def _add_determinant(T): - - return { - 'data': np.linalg.det(T['data']), - 'label': 'det({})'.format(T['label']), - 'meta': { - 'Unit': T['meta']['Unit'], - 'Description': 'Determinant of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'dadf5.py:add_determinant v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_determinant,{'T':T}) + self.__add_generic_pointwise(self._add_determinant,{'T':T}) + @staticmethod + def _add_deviator(T): + if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.deviatoric_part(T['data']), + 'label': 's_{}'.format(T['label']), + 'meta': { + 'Unit': T['meta']['Unit'], + 'Description': 'Deviator of tensor {} ({})'.format(T['label'],T['meta']['Description']), + 'Creator': 'dadf5.py:add_deviator v{}'.format(version) + } + } def add_deviator(self,T): """ Add the deviatoric part of a tensor. @@ -575,24 +585,20 @@ class DADF5(): Label of tensor dataset. """ - def _add_deviator(T): - - if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): - raise ValueError - - return { - 'data': mechanics.deviatoric_part(T['data']), - 'label': 's_{}'.format(T['label']), - 'meta': { - 'Unit': T['meta']['Unit'], - 'Description': 'Deviator of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'dadf5.py:add_deviator v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_deviator,{'T':T}) + self.__add_generic_pointwise(self._add_deviator,{'T':T}) + @staticmethod + def _add_eigenvalue(S): + return { + 'data': mechanics.eigenvalues(S['data']), + 'label': 'lambda({})'.format(S['label']), + 'meta' : { + 'Unit': S['meta']['Unit'], + 'Description': 'Eigenvalues of {} ({})'.format(S['label'],S['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) + } + } def add_eigenvalues(self,S): """ Add eigenvalues of symmetric tensor. @@ -603,21 +609,20 @@ class DADF5(): Label of symmetric tensor dataset. """ - def _add_eigenvalue(S): - - return { - 'data': mechanics.eigenvalues(S['data']), - 'label': 'lambda({})'.format(S['label']), - 'meta' : { - 'Unit': S['meta']['Unit'], - 'Description': 'Eigenvalues of {} ({})'.format(S['label'],S['meta']['Description']), - 'Creator': 'dadf5.py:add_eigenvalues v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_eigenvalue,{'S':S}) + self.__add_generic_pointwise(self._add_eigenvalue,{'S':S}) + @staticmethod + def _add_eigenvector(S): + return { + 'data': mechanics.eigenvectors(S['data']), + 'label': 'v({})'.format(S['label']), + 'meta' : { + 'Unit': '1', + 'Description': 'Eigenvectors of {} ({})'.format(S['label'],S['meta']['Description']), + 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) + } + } def add_eigenvectors(self,S): """ Add eigenvectors of symmetric tensor. @@ -628,21 +633,32 @@ class DADF5(): Label of symmetric tensor dataset. """ - def _add_eigenvector(S): - - return { - 'data': mechanics.eigenvectors(S['data']), - 'label': 'v({})'.format(S['label']), - 'meta' : { - 'Unit': '1', - 'Description': 'Eigenvectors of {} ({})'.format(S['label'],S['meta']['Description']), - 'Creator': 'dadf5.py:add_eigenvectors v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_eigenvector,{'S':S}) + self.__add_generic_pointwise(self._add_eigenvector,{'S':S}) + @staticmethod + def _add_IPFcolor(q,l): + d = np.array(l) + d_unit = d/np.linalg.norm(d) + m = util.scale_to_coprime(d) + colors = np.empty((len(q['data']),3),np.uint8) + + lattice = q['meta']['Lattice'] + + for i,q in enumerate(q['data']): + o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() + colors[i] = np.uint8(o.IPFcolor(d_unit)*255) + + return { + 'data': colors, + 'label': 'IPFcolor_[{} {} {}]'.format(*m), + 'meta' : { + 'Unit': 'RGB (8bit)', + 'Lattice': lattice, + 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), + 'Creator': 'dadf5.py:add_IPFcolor v{}'.format(version) + } + } def add_IPFcolor(self,q,l): """ Add RGB color tuple of inverse pole figure (IPF) color. @@ -655,33 +671,20 @@ class DADF5(): Lab frame direction for inverse pole figure. """ - def _add_IPFcolor(q,l): + self.__add_generic_pointwise(self._add_IPFcolor,{'q':q},{'l':l}) - d = np.array(l) - d_unit = d/np.linalg.norm(d) - m = util.scale_to_coprime(d) - colors = np.empty((len(q['data']),3),np.uint8) - lattice = q['meta']['Lattice'] - - for i,q in enumerate(q['data']): - o = Orientation(np.array([q['w'],q['x'],q['y'],q['z']]),lattice).reduced() - colors[i] = np.uint8(o.IPFcolor(d_unit)*255) - - return { - 'data': colors, - 'label': 'IPFcolor_[{} {} {}]'.format(*m), - 'meta' : { - 'Unit': 'RGB (8bit)', - 'Lattice': lattice, - 'Description': 'Inverse Pole Figure (IPF) colors for direction/plane [{} {} {})'.format(*m), - 'Creator': 'dadf5.py:add_IPFcolor v{}'.format(version) - } + @staticmethod + def _add_maximum_shear(S): + return { + 'data': mechanics.maximum_shear(S['data']), + 'label': 'max_shear({})'.format(S['label']), + 'meta': { + 'Unit': S['meta']['Unit'], + 'Description': 'Maximum shear component of {} ({})'.format(S['label'],S['meta']['Description']), + 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) + } } - - self.__add_generic_pointwise(_add_IPFcolor,{'q':q},{'l':l}) - - def add_maximum_shear(self,S): """ Add maximum shear components of symmetric tensor. @@ -692,21 +695,23 @@ class DADF5(): Label of symmetric tensor dataset. """ - def _add_maximum_shear(S): - - return { - 'data': mechanics.maximum_shear(S['data']), - 'label': 'max_shear({})'.format(S['label']), - 'meta': { - 'Unit': S['meta']['Unit'], - 'Description': 'Maximum shear component of {} ({})'.format(S['label'],S['meta']['Description']), - 'Creator': 'dadf5.py:add_maximum_shear v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_maximum_shear,{'S':S}) + self.__add_generic_pointwise(self._add_maximum_shear,{'S':S}) + @staticmethod + def _add_Mises(S): + t = 'strain' if S['meta']['Unit'] == '1' else \ + 'stress' + + return { + 'data': mechanics.Mises_strain(S['data']) if t=='strain' else mechanics.Mises_stress(S['data']), + 'label': '{}_vM'.format(S['label']), + 'meta': { + 'Unit': S['meta']['Unit'], + 'Description': 'Mises equivalent {} of {} ({})'.format(t,S['label'],S['meta']['Description']), + 'Creator': 'dadf5.py:add_Mises v{}'.format(version) + } + } def add_Mises(self,S): """ Add the equivalent Mises stress or strain of a symmetric tensor. @@ -717,23 +722,32 @@ class DADF5(): Label of symmetric tensorial stress or strain dataset. """ - def _add_Mises(S): - - t = 'strain' if S['meta']['Unit'] == '1' else \ - 'stress' - return { - 'data': mechanics.Mises_strain(S['data']) if t=='strain' else mechanics.Mises_stress(S['data']), - 'label': '{}_vM'.format(S['label']), - 'meta': { - 'Unit': S['meta']['Unit'], - 'Description': 'Mises equivalent {} of {} ({})'.format(t,S['label'],S['meta']['Description']), - 'Creator': 'dadf5.py:add_Mises v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_Mises,{'S':S}) + self.__add_generic_pointwise(self._add_Mises,{'S':S}) + @staticmethod + def _add_norm(x,ord): + o = ord + if len(x['data'].shape) == 2: + axis = 1 + t = 'vector' + if o is None: o = 2 + elif len(x['data'].shape) == 3: + axis = (1,2) + t = 'tensor' + if o is None: o = 'fro' + else: + raise ValueError + + return { + 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), + 'label': '|{}|_{}'.format(x['label'],o), + 'meta': { + 'Unit': x['meta']['Unit'], + 'Description': '{}-norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), + 'Creator': 'dadf5.py:add_norm v{}'.format(version) + } + } def add_norm(self,x,ord=None): """ Add the norm of vector or tensor. @@ -746,36 +760,25 @@ class DADF5(): Order of the norm. inf means NumPy’s inf object. For details refer to numpy.linalg.norm. """ - def _add_norm(x,ord): - - o = ord - if len(x['data'].shape) == 2: - axis = 1 - t = 'vector' - if o is None: o = 2 - elif len(x['data'].shape) == 3: - axis = (1,2) - t = 'tensor' - if o is None: o = 'fro' - else: - raise ValueError - - return { - 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), - 'label': '|{}|_{}'.format(x['label'],o), - 'meta': { - 'Unit': x['meta']['Unit'], - 'Description': '{}-norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), - 'Creator': 'dadf5.py:add_norm v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_norm,{'x':x},{'ord':ord}) + self.__add_generic_pointwise(self._add_norm,{'x':x},{'ord':ord}) + @staticmethod + def _add_PK2(P,F): + return { + 'data': mechanics.PK2(P['data'],F['data']), + 'label': 'S', + 'meta': { + 'Unit': P['meta']['Unit'], + 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], + P['meta']['Description'])+\ + 'and {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) + } + } def add_PK2(self,P='P',F='F'): """ - Add 2. Piola-Kirchhoff calculated from first Piola-Kirchhoff stress and deformation gradient. + Add second Piola-Kirchhoff calculated from first Piola-Kirchhoff stress and deformation gradient. Parameters ---------- @@ -785,23 +788,32 @@ class DADF5(): Label of deformation gradient dataset. Defaults to ‘F’. """ - def _add_PK2(P,F): - - return { - 'data': mechanics.PK2(P['data'],F['data']), - 'label': 'S', - 'meta': { - 'Unit': P['meta']['Unit'], - 'Description': '2. Kirchhoff stress calculated from {} ({}) '.format(P['label'], - P['meta']['Description'])+\ - 'and {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_PK2 v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_PK2,{'P':P,'F':F}) + self.__add_generic_pointwise(self._add_PK2,{'P':P,'F':F}) + @staticmethod + def _add_pole(q,p,polar): + pole = np.array(p) + unit_pole = pole/np.linalg.norm(pole) + m = util.scale_to_coprime(pole) + coords = np.empty((len(q['data']),2)) + + for i,q in enumerate(q['data']): + o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) + rotatedPole = o*unit_pole # rotate pole according to crystal orientation + (x,y) = rotatedPole[0:2]/(1.+abs(unit_pole[2])) # stereographic projection + coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] + + return { + 'data': coords, + 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), + 'meta' : { + 'Unit': '1', + 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ + .format('Polar' if polar else 'Cartesian'), + 'Creator' : 'dadf5.py:add_pole v{}'.format(version) + } + } def add_pole(self,q,p,polar=False): """ Add coordinates of stereographic projection of given pole in crystal frame. @@ -816,33 +828,22 @@ class DADF5(): Give pole in polar coordinates. Defaults to False. """ - def _add_pole(q,p,polar): + self.__add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar}) - pole = np.array(p) - unit_pole = pole/np.linalg.norm(pole) - m = util.scale_to_coprime(pole) - coords = np.empty((len(q['data']),2)) - for i,q in enumerate(q['data']): - o = Rotation(np.array([q['w'],q['x'],q['y'],q['z']])) - rotatedPole = o*unit_pole # rotate pole according to crystal orientation - (x,y) = rotatedPole[0:2]/(1.+abs(unit_pole[2])) # stereographic projection - coords[i] = [np.sqrt(x*x+y*y),np.arctan2(y,x)] if polar else [x,y] - - return { - 'data': coords, - 'label': 'p^{}_[{} {} {})'.format(u'rφ' if polar else 'xy',*m), - 'meta' : { - 'Unit': '1', - 'Description': '{} coordinates of stereographic projection of pole (direction/plane) in crystal frame'\ - .format('Polar' if polar else 'Cartesian'), - 'Creator' : 'dadf5.py:add_pole v{}'.format(version) - } + @staticmethod + def _add_rotational_part(F): + if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): + raise ValueError + return { + 'data': mechanics.rotational_part(F['data']), + 'label': 'R({})'.format(F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_rotational_part v{}'.format(version) + } } - - self.__add_generic_pointwise(_add_pole,{'q':q},{'p':p,'polar':polar}) - - def add_rotational_part(self,F): """ Add rotational part of a deformation gradient. @@ -853,21 +854,24 @@ class DADF5(): Label of deformation gradient dataset. """ - def _add_rotational_part(F): - return { - 'data': mechanics.rotational_part(F['data']), - 'label': 'R({})'.format(F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': 'Rotational part of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_rotational_part v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_rotational_part,{'F':F}) + self.__add_generic_pointwise(self._add_rotational_part,{'F':F}) + @staticmethod + def _add_spherical(T): + if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.spherical_part(T['data']), + 'label': 'p_{}'.format(T['label']), + 'meta': { + 'Unit': T['meta']['Unit'], + 'Description': 'Spherical component of tensor {} ({})'.format(T['label'],T['meta']['Description']), + 'Creator': 'dadf5.py:add_spherical v{}'.format(version) + } + } def add_spherical(self,T): """ Add the spherical (hydrostatic) part of a tensor. @@ -878,24 +882,22 @@ class DADF5(): Label of tensor dataset. """ - def _add_spherical(T): + self.__add_generic_pointwise(self._add_spherical,{'T':T}) - if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): + + @staticmethod + def _add_strain_tensor(F,t,m): + if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): raise ValueError - - return { - 'data': mechanics.spherical_part(T['data']), - 'label': 'p_{}'.format(T['label']), - 'meta': { - 'Unit': T['meta']['Unit'], - 'Description': 'Spherical component of tensor {} ({})'.format(T['label'],T['meta']['Description']), - 'Creator': 'dadf5.py:add_spherical v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_spherical,{'T':T}) - - + return { + 'data': mechanics.strain_tensor(F['data'],t,m), + 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': 'Strain tensor of {} ({})'.format(F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_strain_tensor v{}'.format(version) + } + } def add_strain_tensor(self,F='F',t='V',m=0.0): """ Add strain tensor of a deformation gradient. @@ -913,21 +915,24 @@ class DADF5(): Order of the strain calculation. Defaults to ‘0.0’. """ - def _add_strain_tensor(F,t,m): - - return { - 'data': mechanics.strain_tensor(F['data'],t,m), - 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': 'Strain tensor of {} ({})'.format(F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_strain_tensor v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_strain_tensor,{'F':F},{'t':t,'m':m}) + self.__add_generic_pointwise(self._add_strain_tensor,{'F':F},{'t':t,'m':m}) + @staticmethod + def _add_stretch_tensor(F,t): + if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): + raise ValueError + + return { + 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), + 'label': '{}({})'.format(t,F['label']), + 'meta': { + 'Unit': F['meta']['Unit'], + 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', + F['label'],F['meta']['Description']), + 'Creator': 'dadf5.py:add_stretch_tensor v{}'.format(version) + } + } def add_stretch_tensor(self,F='F',t='V'): """ Add stretch tensor of a deformation gradient. @@ -941,77 +946,54 @@ class DADF5(): Defaults to ‘V’. """ - def _add_stretch_tensor(F,t): - - return { - 'data': mechanics.left_stretch(F['data']) if t == 'V' else mechanics.right_stretch(F['data']), - 'label': '{}({})'.format(t,F['label']), - 'meta': { - 'Unit': F['meta']['Unit'], - 'Description': '{} stretch tensor of {} ({})'.format('Left' if t == 'V' else 'Right', - F['label'],F['meta']['Description']), - 'Creator': 'dadf5.py:add_stretch_tensor v{}'.format(version) - } - } - - self.__add_generic_pointwise(_add_stretch_tensor,{'F':F},{'t':t}) + self.__add_generic_pointwise(self._add_stretch_tensor,{'F':F},{'t':t}) - def __add_generic_pointwise(self,func,dataset_mapping,args={}): - """ - General function to add pointwise data. + def job(self,group,func,datasets,args,lock): + try: + d = self._read(group,datasets,lock) + r = func(**d,**args) + return [group,r] + except Exception as err: + print('Error during calculation: {}.'.format(err)) + return None - Parameters - ---------- - func : function - Function that calculates a new dataset from one or more datasets per HDF5 group. - dataset_mapping : dictionary - Mapping HDF5 data label to callback function argument - extra_args : dictionary, optional - Any extra arguments parsed to func. - """ - def job(args): - """Call function with input data + extra arguments, returns results + group.""" - args['results'].put({**args['func'](**args['in']),'group':args['group']}) + def _read(self,group,datasets,lock): + datasets_in = {} + lock.acquire() + with h5py.File(self.fname,'r') as f: + for k,v in datasets.items(): + loc = f[group+'/'+v] + datasets_in[k]={'data':loc[()], + 'label':v, + 'meta':{k2:v2.decode() for k2,v2 in loc.attrs.items()}} + lock.release() + return datasets_in - env = Environment() - N_threads = int(env.options['DAMASK_NUM_THREADS']) - N_threads //=N_threads # disable for the moment + def __add_generic_pointwise(self,func,datasets,args={}): - results = Queue(N_threads) - pool = util.ThreadPool(N_threads) - N_added = N_threads + 1 + env = Environment() + N_threads = int(env.options['DAMASK_NUM_THREADS']) + pool = multiprocessing.Pool(N_threads) + m = multiprocessing.Manager() + lock = m.Lock() - todo = [] - # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task - for group in self.groups_with_datasets(dataset_mapping.values()): - with h5py.File(self.fname,'r') as f: - datasets_in = {} - for arg,label in dataset_mapping.items(): - loc = f[group+'/'+label] - data = loc[()] - meta = {k:loc.attrs[k].decode() for k in loc.attrs.keys()} - datasets_in[arg] = {'data': data, 'meta': meta, 'label': label} - - todo.append({'in':{**datasets_in,**args},'func':func,'group':group,'results':results}) - - pool.map(job, todo[:N_added]) # initialize - - N_not_calculated = len(todo) - while N_not_calculated > 0: - result = results.get() - with h5py.File(self.fname,'a') as f: # write to file - dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) - for k in result['meta'].keys(): - dataset_out.attrs[k] = result['meta'][k].encode() - N_not_calculated-=1 - - if N_added < len(todo): # add more jobs - pool.add_task(job,todo[N_added]) - N_added +=1 - - pool.wait_completion() + groups = self.groups_with_datasets(datasets.values()) + default_arg = partial(self.job,func=func,datasets=datasets,args=args,lock=lock) + for result in pool.imap_unordered(default_arg,groups): + if not result: continue + lock.acquire() + with h5py.File(self.fname, 'a') as f: + try: + dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) + for l,v in result[1]['meta'].items(): + dataset.attrs[l]=v.encode() + except OSError as err: + print('Could not add dataset: {}.'.format(err)) + lock.release() + pool.close() + pool.join() def to_vtk(self,labels,mode='cell'): diff --git a/python/damask/util.py b/python/damask/util.py index 1cb7b8602..0abf6050e 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -201,57 +201,3 @@ class return_message(): def __repr__(self): """Return message suitable for interactive shells.""" return srepr(self.message) - - -class ThreadPool: - """Pool of threads consuming tasks from a queue.""" - - class Worker(Thread): - """Thread executing tasks from a given tasks queue.""" - - def __init__(self, tasks): - """Worker for tasks.""" - Thread.__init__(self) - self.tasks = tasks - self.daemon = True - self.start() - - def run(self): - while True: - func, args, kargs = self.tasks.get() - try: - func(*args, **kargs) - except Exception as e: - # An exception happened in this thread - print(e) - finally: - # Mark this task as done, whether an exception happened or not - self.tasks.task_done() - - - def __init__(self, num_threads): - """ - Thread pool. - - Parameters - ---------- - num_threads : int - number of threads - - """ - self.tasks = Queue(num_threads) - for _ in range(num_threads): - self.Worker(self.tasks) - - def add_task(self, func, *args, **kargs): - """Add a task to the queue.""" - self.tasks.put((func, args, kargs)) - - def map(self, func, args_list): - """Add a list of tasks to the queue.""" - for args in args_list: - self.add_task(func, args) - - def wait_completion(self): - """Wait for completion of all the tasks in the queue.""" - self.tasks.join() From 0358e7343b879e992bb678aba251e3687ada2692 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 19:37:26 +0100 Subject: [PATCH 28/51] polishing --- python/damask/dadf5.py | 37 ++++++++++++++++++------------------- python/damask/util.py | 2 -- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index d27b0828f..ce469f57e 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -37,24 +37,23 @@ class DADF5(): with h5py.File(fname,'r') as f: try: - self.version_major = f.attrs['DADF5_version_major'] - self.version_minor = f.attrs['DADF5_version_minor'] + self.version_major = f.attrs['DADF5_version_major'] + self.version_minor = f.attrs['DADF5_version_minor'] except KeyError: - self.version_major = f.attrs['DADF5-major'] - self.version_minor = f.attrs['DADF5-minor'] + self.version_major = f.attrs['DADF5-major'] + self.version_minor = f.attrs['DADF5-minor'] if self.version_major != 0 or not 2 <= self.version_minor <= 6: - raise TypeError('Unsupported DADF5 version {}.{} '.format(f.attrs['DADF5_version_major'], - f.attrs['DADF5_version_minor'])) + raise TypeError('Unsupported DADF5 version {}.{} '.format(self.version_major, + self.version_minor)) self.structured = 'grid' in f['geometry'].attrs.keys() if self.structured: - self.grid = f['geometry'].attrs['grid'] - self.size = f['geometry'].attrs['size'] - if self.version_major == 0 and self.version_minor >= 5: - self.origin = f['geometry'].attrs['origin'] - + self.grid = f['geometry'].attrs['grid'] + self.size = f['geometry'].attrs['size'] + self.origin = f['geometry'].attrs['origin'] if self.version_major == 0 and self.version_minor >= 5 else \ + np.zeros(3) r=re.compile('inc[0-9]+') increments_unsorted = {int(i[3:]):i for i in f.keys() if r.match(i)} @@ -67,12 +66,12 @@ class DADF5(): self.con_physics = [] for c in self.constituents: - self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() + self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys() self.con_physics = list(set(self.con_physics)) # make unique self.mat_physics = [] for m in self.materialpoints: - self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() + self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys() self.mat_physics = list(set(self.mat_physics)) # make unique self.selection= {'increments': self.increments, @@ -563,7 +562,7 @@ class DADF5(): @staticmethod def _add_deviator(T): - if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): + if not T['data'].shape[1:] == (3,3): raise ValueError return { @@ -833,7 +832,7 @@ class DADF5(): @staticmethod def _add_rotational_part(F): - if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): + if not F['data'].shape[1:] == (3,3): raise ValueError return { 'data': mechanics.rotational_part(F['data']), @@ -854,13 +853,12 @@ class DADF5(): Label of deformation gradient dataset. """ - self.__add_generic_pointwise(self._add_rotational_part,{'F':F}) @staticmethod def _add_spherical(T): - if not np.all(np.array(T['data'].shape[1:]) == np.array([3,3])): + if not T['data'].shape[1:] == (3,3): raise ValueError return { @@ -887,8 +885,9 @@ class DADF5(): @staticmethod def _add_strain_tensor(F,t,m): - if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): + if not F['data'].shape[1:] == (3,3): raise ValueError + return { 'data': mechanics.strain_tensor(F['data'],t,m), 'label': 'epsilon_{}^{}({})'.format(t,m,F['label']), @@ -920,7 +919,7 @@ class DADF5(): @staticmethod def _add_stretch_tensor(F,t): - if not np.all(np.array(F['data'].shape[1:]) == np.array([3,3])): + if not F['data'].shape[1:] == (3,3): raise ValueError return { diff --git a/python/damask/util.py b/python/damask/util.py index 0abf6050e..ca316b4c0 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -6,8 +6,6 @@ import shlex from fractions import Fraction from functools import reduce from optparse import Option -from queue import Queue -from threading import Thread import numpy as np From 0774e121694b2e1739c8cf082ba115d3e949909c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 21:27:08 +0100 Subject: [PATCH 29/51] report progress --- python/damask/dadf5.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index ce469f57e..b049c7dec 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -743,7 +743,7 @@ class DADF5(): 'label': '|{}|_{}'.format(x['label'],o), 'meta': { 'Unit': x['meta']['Unit'], - 'Description': '{}-norm of {} {} ({})'.format(ord,t,x['label'],x['meta']['Description']), + 'Description': '{}-norm of {} {} ({})'.format(o,t,x['label'],x['meta']['Description']), 'Creator': 'dadf5.py:add_norm v{}'.format(version) } } @@ -980,7 +980,9 @@ class DADF5(): groups = self.groups_with_datasets(datasets.values()) default_arg = partial(self.job,func=func,datasets=datasets,args=args,lock=lock) - for result in pool.imap_unordered(default_arg,groups): + util.progressBar(iteration=0,total=len(groups)-1) + for i,result in enumerate(pool.imap_unordered(default_arg,groups)): + util.progressBar(iteration=i,total=len(groups)-1) if not result: continue lock.acquire() with h5py.File(self.fname, 'a') as f: From e9bf02a52c20b31124cd7945c7de4c7bc5c0174c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 21:37:02 +0100 Subject: [PATCH 30/51] one underscore commonly denotes internal functions --- python/damask/dadf5.py | 66 +++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index b049c7dec..c13cf527f 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -464,7 +464,7 @@ class DADF5(): Label of scalar, vector, or tensor dataset to take absolute value of. """ - self.__add_generic_pointwise(self._add_absolute,{'x':x}) + self._add_generic_pointwise(self._add_absolute,{'x':x}) @staticmethod @@ -505,7 +505,7 @@ class DADF5(): dataset_mapping = {d:d for d in set(re.findall(r'#(.*?)#',formula))} # datasets used in the formula args = {'formula':formula,'label':label,'unit':unit,'description':description} - self.__add_generic_pointwise(self._add_calculation,dataset_mapping,args) + self._add_generic_pointwise(self._add_calculation,dataset_mapping,args) @staticmethod @@ -533,7 +533,7 @@ class DADF5(): Label of the dataset containing the deformation gradient. Defaults to ‘F’. """ - self.__add_generic_pointwise(self._add_Cauchy,{'P':P,'F':F}) + self._add_generic_pointwise(self._add_Cauchy,{'P':P,'F':F}) @staticmethod @@ -557,7 +557,7 @@ class DADF5(): Label of tensor dataset. """ - self.__add_generic_pointwise(self._add_determinant,{'T':T}) + self._add_generic_pointwise(self._add_determinant,{'T':T}) @staticmethod @@ -584,7 +584,7 @@ class DADF5(): Label of tensor dataset. """ - self.__add_generic_pointwise(self._add_deviator,{'T':T}) + self._add_generic_pointwise(self._add_deviator,{'T':T}) @staticmethod @@ -608,7 +608,7 @@ class DADF5(): Label of symmetric tensor dataset. """ - self.__add_generic_pointwise(self._add_eigenvalue,{'S':S}) + self._add_generic_pointwise(self._add_eigenvalue,{'S':S}) @staticmethod @@ -632,7 +632,7 @@ class DADF5(): Label of symmetric tensor dataset. """ - self.__add_generic_pointwise(self._add_eigenvector,{'S':S}) + self._add_generic_pointwise(self._add_eigenvector,{'S':S}) @staticmethod @@ -670,7 +670,7 @@ class DADF5(): Lab frame direction for inverse pole figure. """ - self.__add_generic_pointwise(self._add_IPFcolor,{'q':q},{'l':l}) + self._add_generic_pointwise(self._add_IPFcolor,{'q':q},{'l':l}) @staticmethod @@ -694,7 +694,7 @@ class DADF5(): Label of symmetric tensor dataset. """ - self.__add_generic_pointwise(self._add_maximum_shear,{'S':S}) + self._add_generic_pointwise(self._add_maximum_shear,{'S':S}) @staticmethod @@ -721,7 +721,7 @@ class DADF5(): Label of symmetric tensorial stress or strain dataset. """ - self.__add_generic_pointwise(self._add_Mises,{'S':S}) + self._add_generic_pointwise(self._add_Mises,{'S':S}) @staticmethod @@ -759,7 +759,7 @@ class DADF5(): Order of the norm. inf means NumPy’s inf object. For details refer to numpy.linalg.norm. """ - self.__add_generic_pointwise(self._add_norm,{'x':x},{'ord':ord}) + self._add_generic_pointwise(self._add_norm,{'x':x},{'ord':ord}) @staticmethod @@ -787,7 +787,7 @@ class DADF5(): Label of deformation gradient dataset. Defaults to ‘F’. """ - self.__add_generic_pointwise(self._add_PK2,{'P':P,'F':F}) + self._add_generic_pointwise(self._add_PK2,{'P':P,'F':F}) @staticmethod @@ -827,7 +827,7 @@ class DADF5(): Give pole in polar coordinates. Defaults to False. """ - self.__add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar}) + self._add_generic_pointwise(self._add_pole,{'q':q},{'p':p,'polar':polar}) @staticmethod @@ -853,7 +853,7 @@ class DADF5(): Label of deformation gradient dataset. """ - self.__add_generic_pointwise(self._add_rotational_part,{'F':F}) + self._add_generic_pointwise(self._add_rotational_part,{'F':F}) @staticmethod @@ -880,7 +880,7 @@ class DADF5(): Label of tensor dataset. """ - self.__add_generic_pointwise(self._add_spherical,{'T':T}) + self._add_generic_pointwise(self._add_spherical,{'T':T}) @staticmethod @@ -914,7 +914,7 @@ class DADF5(): Order of the strain calculation. Defaults to ‘0.0’. """ - self.__add_generic_pointwise(self._add_strain_tensor,{'F':F},{'t':t,'m':m}) + self._add_generic_pointwise(self._add_strain_tensor,{'F':F},{'t':t,'m':m}) @staticmethod @@ -945,12 +945,24 @@ class DADF5(): Defaults to ‘V’. """ - self.__add_generic_pointwise(self._add_stretch_tensor,{'F':F},{'t':t}) + self._add_generic_pointwise(self._add_stretch_tensor,{'F':F},{'t':t}) - def job(self,group,func,datasets,args,lock): + def _job(self,group,func,datasets,args,lock): + def _read(group,datasets,lock): + datasets_in = {} + lock.acquire() + with h5py.File(self.fname,'r') as f: + for k,v in datasets.items(): + loc = f[group+'/'+v] + datasets_in[k]={'data':loc[()], + 'label':v, + 'meta':{k2:v2.decode() for k2,v2 in loc.attrs.items()}} + lock.release() + return datasets_in + try: - d = self._read(group,datasets,lock) + d = _read(group,datasets,lock) r = func(**d,**args) return [group,r] except Exception as err: @@ -958,19 +970,7 @@ class DADF5(): return None - def _read(self,group,datasets,lock): - datasets_in = {} - lock.acquire() - with h5py.File(self.fname,'r') as f: - for k,v in datasets.items(): - loc = f[group+'/'+v] - datasets_in[k]={'data':loc[()], - 'label':v, - 'meta':{k2:v2.decode() for k2,v2 in loc.attrs.items()}} - lock.release() - return datasets_in - - def __add_generic_pointwise(self,func,datasets,args={}): + def _add_generic_pointwise(self,func,datasets,args={}): env = Environment() N_threads = int(env.options['DAMASK_NUM_THREADS']) @@ -979,7 +979,7 @@ class DADF5(): lock = m.Lock() groups = self.groups_with_datasets(datasets.values()) - default_arg = partial(self.job,func=func,datasets=datasets,args=args,lock=lock) + default_arg = partial(self._job,func=func,datasets=datasets,args=args,lock=lock) util.progressBar(iteration=0,total=len(groups)-1) for i,result in enumerate(pool.imap_unordered(default_arg,groups)): util.progressBar(iteration=i,total=len(groups)-1) From f604314207d5a698cc386cc8f383a17b16c12029 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 23:16:25 +0100 Subject: [PATCH 31/51] polishing --- python/damask/dadf5.py | 82 +++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index c13cf527f..f1476af61 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -949,21 +949,20 @@ class DADF5(): def _job(self,group,func,datasets,args,lock): - def _read(group,datasets,lock): - datasets_in = {} - lock.acquire() - with h5py.File(self.fname,'r') as f: - for k,v in datasets.items(): - loc = f[group+'/'+v] - datasets_in[k]={'data':loc[()], - 'label':v, - 'meta':{k2:v2.decode() for k2,v2 in loc.attrs.items()}} - lock.release() - return datasets_in - + """ + Execute job for _add_generic_pointwise + """ try: - d = _read(group,datasets,lock) - r = func(**d,**args) + datasets_in = {} + lock.acquire() + with h5py.File(self.fname,'r') as f: + for arg,label in datasets.items(): + loc = f[group+'/'+label] + datasets_in[arg]={'data' :loc[()], + 'label':label, + 'meta': {k:v.decode() for k,v in loc.attrs.items()}} + lock.release() + r = func(**datasets_in,**args) return [group,r] except Exception as err: print('Error during calculation: {}.'.format(err)) @@ -971,30 +970,41 @@ class DADF5(): def _add_generic_pointwise(self,func,datasets,args={}): + """ + General function to add pointwise data. - env = Environment() - N_threads = int(env.options['DAMASK_NUM_THREADS']) - pool = multiprocessing.Pool(N_threads) - m = multiprocessing.Manager() - lock = m.Lock() + Parameters + ---------- + func : function + Callback function that calculates a new dataset from one or more datasets per HDF5 group. + datasets : dictionary + Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func). + args : dictionary, optional + Arguments parsed to func. + """ + N_threads = int(Environment().options['DAMASK_NUM_THREADS']) + pool = multiprocessing.Pool(N_threads) + lock = multiprocessing.Manager().Lock() - groups = self.groups_with_datasets(datasets.values()) - default_arg = partial(self._job,func=func,datasets=datasets,args=args,lock=lock) - util.progressBar(iteration=0,total=len(groups)-1) - for i,result in enumerate(pool.imap_unordered(default_arg,groups)): - util.progressBar(iteration=i,total=len(groups)-1) - if not result: continue - lock.acquire() - with h5py.File(self.fname, 'a') as f: - try: - dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) - for l,v in result[1]['meta'].items(): - dataset.attrs[l]=v.encode() - except OSError as err: - print('Could not add dataset: {}.'.format(err)) - lock.release() - pool.close() - pool.join() + groups = self.groups_with_datasets(datasets.values()) + default_arg = partial(self._job,func=func,datasets=datasets,args=args,lock=lock) + + util.progressBar(iteration=0,total=len(groups)) + for i,result in enumerate(pool.imap_unordered(default_arg,groups)): + util.progressBar(iteration=i+1,total=len(groups)) + if not result: continue + lock.acquire() + with h5py.File(self.fname, 'a') as f: + try: + dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data']) + for l,v in result[1]['meta'].items(): + dataset.attrs[l]=v.encode() + except OSError as err: + print('Could not add dataset: {}.'.format(err)) + lock.release() + + pool.close() + pool.join() def to_vtk(self,labels,mode='cell'): From f72b856a189d9af9ce50157ddf60c298f4eb9e00 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 23:37:35 +0100 Subject: [PATCH 32/51] no reason for critical will always (and only) set to .true. --- src/homogenization.f90 | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 08e02290b..2faebfe1b 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -333,12 +333,10 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt) !$OMP FLUSH(terminallyIll) if (.not. terminallyIll) then ! so first signals terminally ill... !$OMP CRITICAL (write2out) - write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill' + write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill' !$OMP END CRITICAL (write2out) endif - !$OMP CRITICAL (setTerminallyIll) - terminallyIll = .true. ! ...and kills all others - !$OMP END CRITICAL (setTerminallyIll) + terminallyIll = .true. ! ...and kills all others else ! cutback makes sense materialpoint_subStep(i,e) = subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback From 62898a70a026c5444b8fd5f0134acf89bdb088b3 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 23:40:29 +0100 Subject: [PATCH 33/51] use automated LHS allocation --- src/homogenization.f90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 2faebfe1b..9d935866c 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -146,9 +146,7 @@ subroutine homogenization_init !-------------------------------------------------------------------------------------------------- ! allocate and initialize global variables allocate(materialpoint_dPdF(3,3,3,3,discretization_nIP,discretization_nElem), source=0.0_pReal) - allocate(materialpoint_F0(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal) materialpoint_F0 = spread(spread(math_I3,3,discretization_nIP),4,discretization_nElem) ! initialize to identity - allocate(materialpoint_F(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal) materialpoint_F = materialpoint_F0 ! initialize to identity allocate(materialpoint_subF0(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal) allocate(materialpoint_subF(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal) From f20a82ce6d844ed53bd3cb6aa8c61ab722d08edc Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 23:56:18 +0100 Subject: [PATCH 34/51] migrate name: damask.Result better than damask.DADF5 --- python/damask/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/damask/__init__.py b/python/damask/__init__.py index 30fb37ced..2961ba6e3 100644 --- a/python/damask/__init__.py +++ b/python/damask/__init__.py @@ -15,6 +15,7 @@ from .config import Material # noqa from .colormaps import Colormap, Color # noqa from .orientation import Symmetry, Lattice, Rotation, Orientation # noqa from .dadf5 import DADF5 # noqa +from .dadf5 import DADF5 as Result # noqa from .geom import Geom # noqa from .solver import Solver # noqa From be45d62f533c07c063573d929b4686b6bf8c5165 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Feb 2020 23:59:33 +0100 Subject: [PATCH 35/51] polishing --- python/damask/dadf5.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index f1476af61..597784e6b 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -949,9 +949,7 @@ class DADF5(): def _job(self,group,func,datasets,args,lock): - """ - Execute job for _add_generic_pointwise - """ + """Execute job for _add_generic_pointwise.""" try: datasets_in = {} lock.acquire() @@ -981,6 +979,7 @@ class DADF5(): Details of the datasets to be used: label (in HDF5 file) and arg (argument to which the data is parsed in func). args : dictionary, optional Arguments parsed to func. + """ N_threads = int(Environment().options['DAMASK_NUM_THREADS']) pool = multiprocessing.Pool(N_threads) From d0b966f9e78f8f0c679d16e8c3e2903178f80283 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 22 Feb 2020 07:49:32 +0100 Subject: [PATCH 36/51] bugfix: test for nonlocal density conservation failed --- PRIVATE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PRIVATE b/PRIVATE index ec615d249..6db5f4666 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit ec615d249d39e5d01446b01ab9a5b7e7601340ad +Subproject commit 6db5f4666fc651b4de3b44ceaed3f2b848170ac9 From bedcd0ad8cc8e25f2d2a5df61e8d54101329a78c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 22 Feb 2020 11:14:11 +0100 Subject: [PATCH 37/51] sanity check for homogenization number of (constituent) per microstructure need to match Nconstituents in homogenization. --- src/IO.f90 | 2 ++ src/material.f90 | 28 +++++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/IO.f90 b/src/IO.f90 index c31ebdfab..6ab87715c 100644 --- a/src/IO.f90 +++ b/src/IO.f90 @@ -400,6 +400,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg) msg = 'number of values does not match' case (147) msg = 'not supported anymore' + case (148) + msg = 'Nconstituents mismatch between homogenization and microstructure' !-------------------------------------------------------------------------------------------------- ! material error messages and related messages in mesh diff --git a/src/material.f90 b/src/material.f90 index 7e68049b8..05c027d55 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -257,7 +257,7 @@ subroutine material_init allocate(damage (material_Nhomogenization)) allocate(temperatureRate (material_Nhomogenization)) - + do m = 1,size(config_microstructure) if(minval(microstructure_phase(1:microstructure_Nconstituents(m),m)) < 1 .or. & maxval(microstructure_phase(1:microstructure_Nconstituents(m),m)) > size(config_phase)) & @@ -268,6 +268,7 @@ subroutine material_init if(microstructure_Nconstituents(m) < 1) & call IO_error(151,m) enddo + if(homogenization_maxNgrains > size(microstructure_phase,1)) call IO_error(148) debugOut: if (iand(myDebug,debug_levelExtensive) /= 0) then write(6,'(/,a,/)') ' MATERIAL configuration' @@ -290,6 +291,7 @@ subroutine material_init !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ! new mappings + allocate(material_phaseAt(homogenization_maxNgrains,discretization_nElem), source=0) allocate(material_texture(homogenization_maxNgrains,discretization_nIP,discretization_nElem),source=0) !this is only needed by plasticity nonlocal allocate(material_orientation0(homogenization_maxNgrains,discretization_nIP,discretization_nElem)) @@ -298,9 +300,17 @@ subroutine material_init do i = 1, discretization_nIP myMicro = discretization_microstructureAt(e) do c = 1, homogenization_Ngrains(discretization_homogenizationAt(e)) - material_phaseAt(c,e) = microstructure_phase(c,myMicro) - material_texture(c,i,e) = microstructure_texture(c,myMicro) - material_orientation0(c,i,e) = texture_orientation(material_texture(c,i,e)) + if(microstructure_phase(c,myMicro) > 0) then + material_phaseAt(c,e) = microstructure_phase(c,myMicro) + else + call IO_error(150,ext_msg='phase') + endif + if(microstructure_texture(c,myMicro) > 0) then + material_texture(c,i,e) = microstructure_texture(c,myMicro) + material_orientation0(c,i,e) = texture_orientation(material_texture(c,i,e)) + else + call IO_error(150,ext_msg='texture') + endif enddo enddo enddo @@ -464,7 +474,7 @@ subroutine material_parseMicrostructure real(pReal), dimension(:,:), allocatable :: & microstructure_fraction !< vol fraction of each constituent in microstructure integer :: & - microstructure_maxNconstituents !< max number of constituents in any phase + maxNconstituents !< max number of constituents in any phase allocate(microstructure_Nconstituents(size(config_microstructure)), source=0) @@ -475,10 +485,10 @@ subroutine material_parseMicrostructure microstructure_Nconstituents(m) = config_microstructure(m)%countKeys('(constituent)') enddo - microstructure_maxNconstituents = maxval(microstructure_Nconstituents) - allocate(microstructure_phase (microstructure_maxNconstituents,size(config_microstructure)),source=0) - allocate(microstructure_texture (microstructure_maxNconstituents,size(config_microstructure)),source=0) - allocate(microstructure_fraction(microstructure_maxNconstituents,size(config_microstructure)),source=0.0_pReal) + maxNconstituents = maxval(microstructure_Nconstituents) + allocate(microstructure_phase (maxNconstituents,size(config_microstructure)),source=0) + allocate(microstructure_texture (maxNconstituents,size(config_microstructure)),source=0) + allocate(microstructure_fraction(maxNconstituents,size(config_microstructure)),source=0.0_pReal) allocate(strings(1)) ! Intel 16.0 Bug do m=1, size(config_microstructure) From 2cf89b1fddbfcbfe7f0227d11b49d3870854713b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 22 Feb 2020 11:26:12 +0100 Subject: [PATCH 38/51] bugfix: output dataset was too large in case of Ngrains>1 material_phaseAt is defined per constituent/grain/component => no need to multiply with homogenization_maxNgrains --- src/crystallite.f90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crystallite.f90 b/src/crystallite.f90 index c01f9fa67..a1610b254 100644 --- a/src/crystallite.f90 +++ b/src/crystallite.f90 @@ -734,7 +734,7 @@ subroutine crystallite_results real(pReal), allocatable, dimension(:,:,:) :: select_tensors integer :: e,i,c,j - allocate(select_tensors(3,3,count(material_phaseAt==instance)*homogenization_maxNgrains*discretization_nIP)) + allocate(select_tensors(3,3,count(material_phaseAt==instance)*discretization_nIP)) j=0 do e = 1, size(material_phaseAt,2) From 7d6c0dc5f4e148fd9859beecb2c25aa9c264a0f2 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 22 Feb 2020 11:42:06 +0100 Subject: [PATCH 39/51] is only needed during initialization --- src/material.f90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/material.f90 b/src/material.f90 index 05c027d55..9c5009484 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -317,6 +317,7 @@ subroutine material_init deallocate(microstructure_phase) deallocate(microstructure_texture) + deallocate(texture_orientation) allocate(material_homogenizationAt,source=discretization_homogenizationAt) From 605e9769150da5323a3f4a34e4cf9f9280818ec6 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 22 Feb 2020 15:48:40 +0100 Subject: [PATCH 40/51] I don't like loops use language features and helper functions for shorter code --- src/grid/spectral_utilities.f90 | 228 +++++++++++++++----------------- 1 file changed, 104 insertions(+), 124 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index cd064b1f8..87e906727 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -18,12 +18,12 @@ module spectral_utilities use config use discretization use homogenization - + implicit none private - + include 'fftw3-mpi.f03' - + !-------------------------------------------------------------------------------------------------- ! field labels information enum, bind(c) @@ -109,8 +109,8 @@ module spectral_utilities real(pReal) :: timeinc real(pReal) :: timeincOld end type tSolutionParams - - type, private :: tNumerics + + type, private :: tNumerics real(pReal) :: & FFTW_timelimit !< timelimit for FFTW plan creation, see www.fftw.org integer :: & @@ -122,7 +122,7 @@ module spectral_utilities FFTW_plan_mode, & !< FFTW plan mode, see www.fftw.org PETSc_options end type tNumerics - + type(tNumerics) :: num ! numerics parameters. Better name? enum, bind(c) @@ -189,18 +189,18 @@ subroutine utilities_init scalarSize = 1_C_INTPTR_T, & vecSize = 3_C_INTPTR_T, & tensorSize = 9_C_INTPTR_T - + write(6,'(/,a)') ' <<<+- spectral_utilities init -+>>>' - + write(6,'(/,a)') ' Diehl, Diploma Thesis TU München, 2010' write(6,'(a)') ' https://doi.org/10.13140/2.1.3234.3840' - + write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity 46:37–53, 2013' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2012.09.012' - + write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:31–45, 2015' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006' - + write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, 2019' write(6,'(a)') ' https://doi.org/10.1007/978-981-10-6855-3_80' @@ -209,34 +209,34 @@ subroutine utilities_init debugGeneral = iand(debug_level(debug_SPECTRAL),debug_LEVELBASIC) /= 0 debugRotation = iand(debug_level(debug_SPECTRAL),debug_SPECTRALROTATION) /= 0 debugPETSc = iand(debug_level(debug_SPECTRAL),debug_SPECTRALPETSC) /= 0 - + if(debugPETSc) write(6,'(3(/,a),/)') & ' Initializing PETSc with debug options: ', & trim(PETScDebug), & ' add more using the PETSc_Options keyword in numerics.config '; flush(6) - + call PETScOptionsClear(PETSC_NULL_OPTIONS,ierr) CHKERRQ(ierr) if(debugPETSc) call PETScOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr) CHKERRQ(ierr) call PETScOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_options),ierr) CHKERRQ(ierr) - + grid1Red = grid(1)/2 + 1 wgt = 1.0/real(product(grid),pReal) - + write(6,'(/,a,3(i12 ))') ' grid a b c: ', grid write(6,'(a,3(es12.5))') ' size x y z: ', geomSize - + num%memory_efficient = config_numerics%getInt ('memory_efficient', defaultVal=1) > 0 num%FFTW_timelimit = config_numerics%getFloat ('fftw_timelimit', defaultVal=-1.0_pReal) num%divergence_correction = config_numerics%getInt ('divergence_correction', defaultVal=2) num%spectral_derivative = config_numerics%getString('spectral_derivative', defaultVal='continuous') num%FFTW_plan_mode = config_numerics%getString('fftw_plan_mode', defaultVal='FFTW_MEASURE') - + if (num%divergence_correction < 0 .or. num%divergence_correction > 2) & call IO_error(301,ext_msg='divergence_correction') - + select case (num%spectral_derivative) case ('continuous') spectral_derivative_ID = DERIVATIVE_CONTINUOUS_ID @@ -265,8 +265,8 @@ subroutine utilities_init else scaledGeomSize = geomSize endif - - + + select case(IO_lc(num%FFTW_plan_mode)) ! setting parameters for the plan creation of FFTW. Basically a translation from fftw3.f case('fftw_estimate') ! ordered from slow execution (but fast plan creation) to fast execution FFTW_planner_flag = FFTW_ESTIMATE @@ -285,7 +285,7 @@ subroutine utilities_init ! general initialization of FFTW (see manual on fftw.org for more details) if (pReal /= C_DOUBLE .or. kind(1) /= C_INT) call IO_error(0,ext_msg='Fortran to C') ! check for correct precision in C call fftw_set_timelimit(num%FFTW_timelimit) ! set timelimit for plan creation - + if (debugGeneral) write(6,'(/,a)') ' FFTW initialized'; flush(6) !-------------------------------------------------------------------------------------------------- @@ -295,19 +295,19 @@ subroutine utilities_init PETSC_COMM_WORLD, local_K, local_K_offset) allocate (xi1st (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension allocate (xi2nd (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension - + tensorField = fftw_alloc_complex(tensorSize*alloc_local) call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, & 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real tensor representation call c_f_pointer(tensorField, tensorField_fourier, [3_C_INTPTR_T,3_C_INTPTR_T, & gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K]) ! place a pointer for a fourier tensor representation - + vectorField = fftw_alloc_complex(vecSize*alloc_local) call c_f_pointer(vectorField, vectorField_real, [3_C_INTPTR_T,& 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real vector representation call c_f_pointer(vectorField, vectorField_fourier,[3_C_INTPTR_T,& gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a fourier vector representation - + scalarField = fftw_alloc_complex(scalarSize*alloc_local) ! allocate data for real representation (no in place transform) call c_f_pointer(scalarField, scalarField_real, & [2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1),gridFFTW(2),local_K]) ! place a pointer for a real scalar representation @@ -371,7 +371,7 @@ subroutine utilities_init xi1st(1:3,i,j,k-grid3Offset) = xi2nd(1:3,i,j,k-grid3Offset) endwhere enddo; enddo; enddo - + if(num%memory_efficient) then ! allocate just single fourth order tensor allocate (gamma_hat(3,3,3,3,1,1,1), source = cmplx(0.0_pReal,0.0_pReal,pReal)) else ! precalculation of gamma_hat field @@ -388,7 +388,7 @@ end subroutine utilities_init !> In case of an on-the-fly calculation, only the reference stiffness is updated. !--------------------------------------------------------------------------------------------------- subroutine utilities_updateGamma(C) - + real(pReal), intent(in), dimension(3,3,3,3) :: C !< input stiffness to store as reference stiffness complex(pReal), dimension(3,3) :: temp33_complex, xiDyad_cmplx real(pReal), dimension(6,6) :: A, A_inv @@ -396,9 +396,9 @@ subroutine utilities_updateGamma(C) i, j, k, & l, m, n, o logical :: err - + C_ref = C - + if(.not. num%memory_efficient) then gamma_hat = cmplx(0.0_pReal,0.0_pReal,pReal) ! for the singular point and any non invertible A do k = grid3Offset+1, grid3Offset+grid3; do j = 1, grid(2); do i = 1, grid1Red @@ -419,7 +419,7 @@ subroutine utilities_updateGamma(C) endif enddo; enddo; enddo endif - + end subroutine utilities_updateGamma @@ -501,17 +501,17 @@ end subroutine utilities_FFTvectorBackward !> @brief doing convolution gamma_hat * field_real, ensuring that average value = fieldAim !-------------------------------------------------------------------------------------------------- subroutine utilities_fourierGammaConvolution(fieldAim) - + real(pReal), intent(in), dimension(3,3) :: fieldAim !< desired average value of the field after convolution complex(pReal), dimension(3,3) :: temp33_complex, xiDyad_cmplx real(pReal), dimension(6,6) :: A, A_inv - + integer :: & i, j, k, & l, m, n, o logical :: err - - + + write(6,'(/,a)') ' ... doing gamma convolution ...............................................' flush(6) @@ -531,7 +531,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim) temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal) forall(l=1:3, m=1:3, n=1:3, o=1:3) & gamma_hat(l,m,n,o,1,1,1) = temp33_complex(l,n)*conjg(-xi1st(o,i,j,k))*xi1st(m,i,j,k) - else + else gamma_hat(1:3,1:3,1:3,1:3,1,1,1) = cmplx(0.0_pReal,0.0_pReal,pReal) endif forall(l = 1:3, m = 1:3) & @@ -546,7 +546,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim) tensorField_fourier(1:3,1:3,i,j,k) = temp33_Complex enddo; enddo; enddo endif memoryEfficient - + if (grid3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal) end subroutine utilities_fourierGammaConvolution @@ -561,7 +561,7 @@ subroutine utilities_fourierGreenConvolution(D_ref, mobility_ref, deltaT) real(pReal), intent(in) :: mobility_ref, deltaT complex(pReal) :: GreenOp_hat integer :: i, j, k - + !-------------------------------------------------------------------------------------------------- ! do the actual spectral method calculation do k = 1, grid3; do j = 1, grid(2) ;do i = 1, grid1Red @@ -625,16 +625,16 @@ real(pReal) function utilities_curlRMS() integer :: i, j, k, l, ierr complex(pReal), dimension(3,3) :: curl_fourier complex(pReal), dimension(3) :: rescaledGeom - + write(6,'(/,a)') ' ... calculating curl ......................................................' flush(6) - + rescaledGeom = cmplx(geomSize/scaledGeomSize,0.0_pReal) - + !-------------------------------------------------------------------------------------------------- ! calculating max curl criterion in Fourier space utilities_curlRMS = 0.0_pReal - + do k = 1, grid3; do j = 1, grid(2); do i = 2, grid1Red - 1 do l = 1, 3 @@ -669,7 +669,7 @@ real(pReal) function utilities_curlRMS() utilities_curlRMS = utilities_curlRMS & + sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) if(ierr /=0) call IO_error(894, ext_msg='utilities_curlRMS') utilities_curlRMS = sqrt(utilities_curlRMS) * wgt @@ -688,9 +688,10 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) type(rotation), intent(in) :: rot_BC !< rotation of load frame logical, intent(in), dimension(3,3) :: mask_stress !< mask of stress BC - integer :: j, k, m, n - logical, dimension(9) :: mask_stressVector - real(pReal), dimension(9,9) :: temp99_Real + integer :: i, j + logical, dimension(9) :: mask_stressVector + logical, dimension(9,9) :: mask + real(pReal), dimension(9,9) :: temp99_real integer :: size_reduced = 0 real(pReal), dimension(:,:), allocatable :: & s_reduced, & !< reduced compliance matrix (depending on number of stress BC) @@ -698,57 +699,33 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) sTimesC !< temp variable to check inversion logical :: errmatinv character(len=pStringLen):: formatString - + mask_stressVector = reshape(transpose(mask_stress), [9]) size_reduced = count(mask_stressVector) - if(size_reduced > 0 )then - allocate (c_reduced(size_reduced,size_reduced), source =0.0_pReal) - allocate (s_reduced(size_reduced,size_reduced), source =0.0_pReal) - allocate (sTimesC(size_reduced,size_reduced), source =0.0_pReal) - temp99_Real = math_3333to99(rot_BC%rotTensor4(C)) - + if(size_reduced > 0) then + temp99_real = math_3333to99(rot_BC%rotate(C)) + if(debugGeneral) then write(6,'(/,a)') ' ... updating masked compliance ............................................' write(6,'(/,a,/,9(9(2x,f12.7,1x)/))',advance='no') ' Stiffness C (load) / GPa =',& transpose(temp99_Real)*1.0e-9_pReal flush(6) endif - k = 0 ! calculate reduced stiffness - do n = 1,9 - if(mask_stressVector(n)) then - k = k + 1 - j = 0 - do m = 1,9 - if(mask_stressVector(m)) then - j = j + 1 - c_reduced(k,j) = temp99_Real(n,m) - endif; enddo; endif; enddo - + + do i = 1,9; do j = 1,9 + mask(i,j) = mask_stressVector(i) .and. mask_stressVector(j) + enddo; enddo + c_reduced = reshape(pack(temp99_Real,mask),[size_reduced,size_reduced]) + + allocate(s_reduced,mold = c_reduced) call math_invert(s_reduced, errmatinv, c_reduced) ! invert reduced stiffness if (any(IEEE_is_NaN(s_reduced))) errmatinv = .true. if (errmatinv) call IO_error(error_ID=400,ext_msg='utilities_maskedCompliance') - temp99_Real = 0.0_pReal ! fill up compliance with zeros - k = 0 - do n = 1,9 - if(mask_stressVector(n)) then - k = k + 1 - j = 0 - do m = 1,9 - if(mask_stressVector(m)) then - j = j + 1 - temp99_Real(n,m) = s_reduced(k,j) - endif; enddo; endif; enddo !-------------------------------------------------------------------------------------------------- ! check if inversion was successful sTimesC = matmul(c_reduced,s_reduced) - do m=1, size_reduced - do n=1, size_reduced - errmatinv = errmatinv & - .or. (m==n .and. abs(sTimesC(m,n)-1.0_pReal) > 1.0e-12_pReal) & ! diagonal elements of S*C should be 1 - .or. (m/=n .and. abs(sTimesC(m,n)) > 1.0e-12_pReal) ! off-diagonal elements of S*C should be 0 - enddo - enddo + errmatinv = errmatinv .or. any(dNeq(sTimesC,math_identity2nd(size_reduced),1.0e-12_pReal)) if (debugGeneral .or. errmatinv) then write(formatString, '(i2)') size_reduced formatString = '(/,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))' @@ -757,15 +734,18 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) write(6,trim(formatString),advance='no') ' S (load) ', transpose(s_reduced) if(errmatinv) call IO_error(error_ID=400,ext_msg='utilities_maskedCompliance') endif + temp99_real = reshape(unpack(reshape(s_reduced,[size_reduced**2]),reshape(mask,[81]),0.0_pReal),[9,9]) else temp99_real = 0.0_pReal endif + + utilities_maskedCompliance = math_99to3333(temp99_Real) + if(debugGeneral) then write(6,'(/,a,/,9(9(2x,f10.5,1x)/),/)',advance='no') & ' Masked Compliance (load) * GPa =', transpose(temp99_Real)*1.0e9_pReal flush(6) endif - utilities_maskedCompliance = math_99to3333(temp99_Real) end function utilities_maskedCompliance @@ -774,9 +754,9 @@ end function utilities_maskedCompliance !> @brief calculate scalar gradient in fourier field !-------------------------------------------------------------------------------------------------- subroutine utilities_fourierScalarGradient() - + integer :: i, j, k - + do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red vectorField_fourier(1:3,i,j,k) = scalarField_fourier(i,j,k)*xi1st(1:3,i,j,k) ! ToDo: no -conjg? enddo; enddo; enddo @@ -788,9 +768,9 @@ end subroutine utilities_fourierScalarGradient !> @brief calculate vector divergence in fourier field !-------------------------------------------------------------------------------------------------- subroutine utilities_fourierVectorDivergence() - + integer :: i, j, k - + do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red scalarField_fourier(i,j,k) = sum(vectorField_fourier(1:3,i,j,k)*conjg(-xi1st(1:3,i,j,k))) enddo; enddo; enddo @@ -802,9 +782,9 @@ end subroutine utilities_fourierVectorDivergence !> @brief calculate vector gradient in fourier field !-------------------------------------------------------------------------------------------------- subroutine utilities_fourierVectorGradient() - + integer :: i, j, k, m, n - + do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do m = 1, 3; do n = 1, 3 tensorField_fourier(m,n,i,j,k) = vectorField_fourier(m,i,j,k)*xi1st(n,i,j,k) @@ -820,7 +800,7 @@ end subroutine utilities_fourierVectorGradient subroutine utilities_fourierTensorDivergence() integer :: i, j, k - + do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red vectorField_fourier(:,i,j,k) = matmul(tensorField_fourier(:,:,i,j,k),conjg(-xi1st(:,i,j,k))) enddo; enddo; enddo @@ -833,28 +813,28 @@ end subroutine utilities_fourierTensorDivergence !-------------------------------------------------------------------------------------------------- subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& F,timeinc,rotation_BC) - + real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress real(pReal), intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target real(pReal), intent(in) :: timeinc !< loading time type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame - - + + integer :: & i,ierr real(pReal), dimension(3,3,3,3) :: dPdF_max, dPdF_min real(pReal) :: dPdF_norm_max, dPdF_norm_min real(pReal), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF - + write(6,'(/,a)') ' ... evaluating constitutive response ......................................' flush(6) - + materialpoint_F = reshape(F,[3,3,1,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field - + call materialpoint_stressAndItsTangent(.true.,timeinc) ! calculate P field - + P = reshape(materialpoint_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) @@ -862,11 +842,11 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress (lab) / MPa =',& transpose(P_av)*1.e-6_pReal if(present(rotation_BC)) & - P_av = rotation_BC%rotTensor2(P_av) + P_av = rotation_BC%rotate(P_av) write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',& transpose(P_av)*1.e-6_pReal flush(6) - + dPdF_max = 0.0_pReal dPdF_norm_max = 0.0_pReal dPdF_min = huge(1.0_pReal) @@ -881,21 +861,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& dPdF_norm_min = sum(materialpoint_dPdF(1:3,1:3,1:3,1:3,1,i)**2.0_pReal) endif end do - + valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr) if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce max') call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast max') - + valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr) if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce min') call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast min') - + C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) - + C_volAvg = sum(sum(materialpoint_dPdF,dim=6),dim=5) call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) C_volAvg = C_volAvg * wgt @@ -908,7 +888,7 @@ end subroutine utilities_constitutiveResponse !> @brief calculates forward rate, either guessing or just add delta/timeinc !-------------------------------------------------------------------------------------------------- pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate) - + real(pReal), intent(in), dimension(3,3) :: & avRate !< homogeneous addon real(pReal), intent(in) :: & @@ -920,7 +900,7 @@ pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate) field !< data of current step real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: & utilities_calculateRate - + if (heterogeneous) then utilities_calculateRate = (field-field0) / dt else @@ -971,14 +951,14 @@ pure function utilities_getFreqDerivative(k_s) complex(pReal), dimension(3) :: utilities_getFreqDerivative select case (spectral_derivative_ID) - case (DERIVATIVE_CONTINUOUS_ID) + case (DERIVATIVE_CONTINUOUS_ID) utilities_getFreqDerivative = cmplx(0.0_pReal, 2.0_pReal*PI*real(k_s,pReal)/geomSize,pReal) - case (DERIVATIVE_CENTRAL_DIFF_ID) + case (DERIVATIVE_CENTRAL_DIFF_ID) utilities_getFreqDerivative = cmplx(0.0_pReal, sin(2.0_pReal*PI*real(k_s,pReal)/real(grid,pReal)), pReal)/ & - cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal) - - case (DERIVATIVE_FWBW_DIFF_ID) + cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal) + + case (DERIVATIVE_FWBW_DIFF_ID) utilities_getFreqDerivative(1) = & cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) - 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & @@ -986,7 +966,7 @@ pure function utilities_getFreqDerivative(k_s) sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & - cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal) + cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal) utilities_getFreqDerivative(2) = & cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & @@ -994,7 +974,7 @@ pure function utilities_getFreqDerivative(k_s) sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & - cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal) + cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal) utilities_getFreqDerivative(3) = & cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & @@ -1002,7 +982,7 @@ pure function utilities_getFreqDerivative(k_s) sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) - 1.0_pReal, & sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & - cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal) + cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal) end select end function utilities_getFreqDerivative @@ -1014,7 +994,7 @@ end function utilities_getFreqDerivative ! convolution !-------------------------------------------------------------------------------------------------- subroutine utilities_updateCoords(F) - + real(pReal), dimension(3,3,grid(1),grid(2),grid3), intent(in) :: F real(pReal), dimension(3, grid(1),grid(2),grid3) :: IPcoords real(pReal), dimension(3, grid(1),grid(2),grid3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI) @@ -1040,7 +1020,7 @@ subroutine utilities_updateCoords(F) 1, 0, 1, & 1, 1, 1, & 0, 1, 1 ], [3,8]) - + step = geomSize/real(grid, pReal) !-------------------------------------------------------------------------------------------------- ! integration in Fourier space to get fluctuations of cell center discplacements @@ -1057,27 +1037,27 @@ subroutine utilities_updateCoords(F) enddo; enddo; enddo call fftw_mpi_execute_dft_c2r(planVectorBack,vectorField_fourier,vectorField_real) - + !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Bcast') - + !-------------------------------------------------------------------------------------------------- ! pad cell center fluctuations along z-direction (needed when running MPI simulation) IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3) c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer rank_t = modulo(worldrank+1,worldsize) rank_b = modulo(worldrank-1,worldsize) - + ! send bottom layer to process below call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr) if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Isend') call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr) if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv') call MPI_Wait(r,s,ierr) - + ! send top layer to process above if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Wait') call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr) @@ -1085,9 +1065,9 @@ subroutine utilities_updateCoords(F) call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr) if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv') call MPI_Wait(r,s,ierr) - + !-------------------------------------------------------------------------------------------------- - ! calculate nodal displacements + ! calculate nodal displacements nodeCoords = 0.0_pReal do k = 0,grid3; do j = 0,grid(2); do i = 0,grid(1) nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+grid3Offset],pReal))) @@ -1097,17 +1077,17 @@ subroutine utilities_updateCoords(F) + IPfluct_padded(1:3,modulo(me(1)-1,grid(1))+1,modulo(me(2)-1,grid(2))+1,me(3)+1)*0.125_pReal enddo averageFluct enddo; enddo; enddo - + !-------------------------------------------------------------------------------------------------- ! calculate cell center displacements do k = 1,grid3; do j = 1,grid(2); do i = 1,grid(1) IPcoords(1:3,i,j,k) = vectorField_real(1:3,i,j,k) & + matmul(Favg,step*real([i,j,k+grid3Offset]-0.5_pReal,pReal)) enddo; enddo; enddo - + call discretization_setNodeCoords(reshape(NodeCoords,[3,(grid(1)+1)*(grid(2)+1)*(grid3+1)])) call discretization_setIPcoords (reshape(IPcoords, [3,grid(1)*grid(2)*grid3])) - + end subroutine utilities_updateCoords @@ -1115,7 +1095,7 @@ end subroutine utilities_updateCoords !> @brief Write out the current reference stiffness for restart. !--------------------------------------------------------------------------------------------------- subroutine utilities_saveReferenceStiffness - + integer :: & fileUnit @@ -1125,7 +1105,7 @@ subroutine utilities_saveReferenceStiffness write(fileUnit) C_ref close(fileUnit) endif - + end subroutine utilities_saveReferenceStiffness end module spectral_utilities From 1683e18c1fff2b7227b0a06fc8d03f538e413551 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 25 Feb 2020 11:53:02 +0100 Subject: [PATCH 41/51] keep order mainly relevant for increments --- python/damask/dadf5.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 191b2203b..ccbd3be03 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -111,10 +111,13 @@ class DADF5(): if action == 'set': self.selection[what] = valid elif action == 'add': - self.selection[what] = list(existing.union(valid)) + add=existing.union(valid) + add_sorted=sorted(add, key=lambda x: int("".join([i for i in x if i.isdigit()]))) + self.selection[what] = add_sorted elif action == 'del': - self.selection[what] = list(existing.difference_update(valid)) - + diff=existing.difference(valid) + diff_sorted=sorted(diff, key=lambda x: int("".join([i for i in x if i.isdigit()]))) + self.selection[what] = diff_sorted def __time_to_inc(self,start,end): selected = [] From 4889e75e52f0cae33e0238ff34d78feef66f6673 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 25 Feb 2020 17:32:49 +0100 Subject: [PATCH 42/51] clearer structure --- src/lattice.f90 | 657 +++++++++++++++++++++++------------------------- 1 file changed, 320 insertions(+), 337 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index 3d624ba48..b31cf98b3 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -15,14 +15,14 @@ module lattice implicit none private - + ! BEGIN DEPRECATED integer, parameter, public :: & LATTICE_maxNcleavageFamily = 3 !< max # of transformation system families over lattice structures - + integer, allocatable, dimension(:,:), protected, public :: & lattice_NcleavageSystem !< total # of transformation systems in each family - + real(pReal), allocatable, dimension(:,:,:,:,:), protected, public :: & lattice_Scleavage !< Schmid matrices for cleavage systems ! END DEPRECATED @@ -32,16 +32,16 @@ module lattice ! face centered cubic integer, dimension(2), parameter :: & LATTICE_FCC_NSLIPSYSTEM = [12, 6] !< # of slip systems per family for fcc - + integer, dimension(1), parameter :: & LATTICE_FCC_NTWINSYSTEM = [12] !< # of twin systems per family for fcc - + integer, dimension(1), parameter :: & LATTICE_FCC_NTRANSSYSTEM = [12] !< # of transformation systems per family for fcc - + integer, dimension(2), parameter :: & LATTICE_FCC_NCLEAVAGESYSTEM = [3, 4] !< # of cleavage systems per family for fcc - + integer, parameter :: & #ifndef __PGI LATTICE_FCC_NSLIP = sum(LATTICE_FCC_NSLIPSYSTEM), & !< total # of slip systems for fcc @@ -78,7 +78,7 @@ module lattice 0, 1, 1, 0, 1,-1, & 0, 1,-1, 0, 1, 1 & ],pReal),shape(LATTICE_FCC_SYSTEMSLIP)) !< Slip system <110>{111} directions. Sorted according to Eisenlohr & Hantcherli - + real(pReal), dimension(3+3,LATTICE_FCC_NTWIN), parameter :: & LATTICE_FCC_SYSTEMTWIN = reshape(real( [& -2, 1, 1, 1, 1, 1, & @@ -94,7 +94,7 @@ module lattice -1,-2,-1, -1, 1,-1, & -1, 1, 2, -1, 1,-1 & ],pReal),shape(LATTICE_FCC_SYSTEMTWIN)) !< Twin system <112>{111} directions. Sorted according to Eisenlohr & Hantcherli - + integer, dimension(2,LATTICE_FCC_NTWIN), parameter, public :: & LATTICE_FCC_TWINNUCLEATIONSLIPPAIR = reshape( [& 2,3, & @@ -110,7 +110,7 @@ module lattice 10,12, & 10,11 & ],shape(LATTICE_FCC_TWINNUCLEATIONSLIPPAIR)) - + real(pReal), dimension(3+3,LATTICE_FCC_NCLEAVAGE), parameter :: & LATTICE_FCC_SYSTEMCLEAVAGE = reshape(real([& ! Cleavage direction Plane normal @@ -122,18 +122,18 @@ module lattice -1, 0,-1, 1,-1,-1, & 0, 1, 1, -1, 1,-1 & ],pReal),shape(LATTICE_FCC_SYSTEMCLEAVAGE)) - + !-------------------------------------------------------------------------------------------------- ! body centered cubic integer, dimension(2), parameter :: & LATTICE_BCC_NSLIPSYSTEM = [12, 12] !< # of slip systems per family for bcc - + integer, dimension(1), parameter :: & LATTICE_BCC_NTWINSYSTEM = [12] !< # of twin systems per family for bcc - + integer, dimension(2), parameter :: & LATTICE_BCC_NCLEAVAGESYSTEM = [3, 6] !< # of cleavage systems per family for bcc - + integer, parameter :: & #ifndef __PGI LATTICE_BCC_NSLIP = sum(LATTICE_BCC_NSLIPSYSTEM), & !< total # of slip systems for bcc @@ -175,7 +175,7 @@ module lattice -1, 1, 1, 1,-1, 2, & 1, 1, 1, 1, 1,-2 & ],pReal),shape(LATTICE_BCC_SYSTEMSLIP)) - + real(pReal), dimension(3+3,LATTICE_BCC_NTWIN), parameter :: & LATTICE_BCC_SYSTEMTWIN = reshape(real([& ! Twin system <111>{112} @@ -191,8 +191,8 @@ module lattice 1,-1, 1, -1, 1, 2, & -1, 1, 1, 1,-1, 2, & 1, 1, 1, 1, 1,-2 & - ],pReal),shape(LATTICE_BCC_SYSTEMTWIN)) - + ],pReal),shape(LATTICE_BCC_SYSTEMTWIN)) + real(pReal), dimension(3+3,LATTICE_BCC_NCLEAVAGE), parameter :: & LATTICE_BCC_SYSTEMCLEAVAGE = reshape(real([& ! Cleavage direction Plane normal @@ -206,18 +206,18 @@ module lattice -1, 1, 1, 1, 1, 0, & 1, 1, 1, -1, 1, 0 & ],pReal),shape(LATTICE_BCC_SYSTEMCLEAVAGE)) - + !-------------------------------------------------------------------------------------------------- ! hexagonal integer, dimension(6), parameter :: & LATTICE_HEX_NSLIPSYSTEM = [3, 3, 3, 6, 12, 6] !< # of slip systems per family for hex - + integer, dimension(4), parameter :: & LATTICE_HEX_NTWINSYSTEM = [6, 6, 6, 6] !< # of slip systems per family for hex - + integer, dimension(1), parameter :: & LATTICE_HEX_NCLEAVAGESYSTEM = [3] !< # of cleavage systems per family for hex - + integer, parameter :: & #ifndef __PGI LATTICE_HEX_NSLIP = sum(LATTICE_HEX_NSLIPSYSTEM), & !< total # of slip systems for hex @@ -265,14 +265,14 @@ module lattice -1, 2, -1, 3, 1, -1, 0, 1, & -2, 1, 1, 3, 1, -1, 0, 1, & ! pyramidal system: c+a slip <11.3>{-1-1.2} -- as for hexagonal ice (Castelnau et al. 1996, similar to twin system found below) - -1, -1, 2, 3, 1, 1, -2, 2, & ! <11.3>{-1-1.2} shear = 2((c/a)^2-2)/(3 c/a) + -1, -1, 2, 3, 1, 1, -2, 2, & ! <11.3>{-1-1.2} shear = 2((c/a)^2-2)/(3 c/a) 1, -2, 1, 3, -1, 2, -1, 2, & 2, -1, -1, 3, -2, 1, 1, 2, & 1, 1, -2, 3, -1, -1, 2, 2, & -1, 2, -1, 3, 1, -2, 1, 2, & -2, 1, 1, 3, 2, -1, -1, 2 & ],pReal),shape(LATTICE_HEX_SYSTEMSLIP)) !< slip systems for hex, sorted by P. Eisenlohr CCW around starting next to a_1 axis - + real(pReal), dimension(4+4,LATTICE_HEX_NTWIN), parameter :: & LATTICE_HEX_SYSTEMTWIN = reshape(real([& ! Compression or Tension = f(twinning shear=f(c/a)) for each metal ! (according to Yoo 1981) @@ -304,7 +304,7 @@ module lattice 1, -2, 1, -3, 1, -2, 1, 2, & 2, -1, -1, -3, 2, -1, -1, 2 & ],pReal),shape(LATTICE_HEX_SYSTEMTWIN)) !< twin systems for hex, sorted by P. Eisenlohr CCW around starting next to a_1 axis - + real(pReal), dimension(4+4,LATTICE_HEX_NCLEAVAGE), parameter :: & LATTICE_HEX_SYSTEMCLEAVAGE = reshape(real([& ! Cleavage direction Plane normal @@ -312,20 +312,20 @@ module lattice 0, 0, 0, 1, 2,-1,-1, 0, & 0, 0, 0, 1, 0, 1,-1, 0 & ],pReal),shape(LATTICE_HEX_SYSTEMCLEAVAGE)) - - + + !-------------------------------------------------------------------------------------------------- ! body centered tetragonal integer, dimension(13), parameter :: & LATTICE_BCT_NSLIPSYSTEM = [2, 2, 2, 4, 2, 4, 2, 2, 4, 8, 4, 8, 8 ] !< # of slip systems per family for bct (Sn) Bieler J. Electr Mater 2009 - + integer, parameter :: & #ifndef __PGI LATTICE_BCT_NSLIP = sum(LATTICE_BCT_NSLIPSYSTEM) !< total # of slip systems for bct #else LATTICE_BCT_NSLIP = 52 #endif - + real(pReal), dimension(3+3,LATTICE_BCT_NSLIP), parameter :: & LATTICE_BCT_SYSTEMSLIP = reshape(real([& ! Slip direction Plane normal @@ -395,12 +395,12 @@ module lattice -1, 1, 1, -1,-2, 1, & 1, 1, 1, 1,-2, 1 & ],pReal),shape(LATTICE_BCT_SYSTEMSLIP)) !< slip systems for bct sorted by Bieler - + !-------------------------------------------------------------------------------------------------- ! isotropic integer, dimension(1), parameter :: & LATTICE_ISO_NCLEAVAGESYSTEM = [3] !< # of cleavage systems per family for iso - + integer, parameter :: & #ifndef __PGI LATTICE_ISO_NCLEAVAGE = sum(LATTICE_ISO_NCLEAVAGESYSTEM) !< total # of cleavage systems for iso @@ -415,13 +415,13 @@ module lattice 0, 0, 1, 0, 1, 0, & 1, 0, 0, 0, 0, 1 & ],pReal),shape(LATTICE_ISO_SYSTEMCLEAVAGE)) - - + + !-------------------------------------------------------------------------------------------------- ! orthorhombic integer, dimension(3), parameter :: & LATTICE_ORT_NCLEAVAGESYSTEM = [1, 1, 1] !< # of cleavage systems per family for ortho - + integer, parameter :: & #ifndef __PGI LATTICE_ORT_NCLEAVAGE = sum(LATTICE_ORT_NCLEAVAGESYSTEM) !< total # of cleavage systems for ortho @@ -436,23 +436,23 @@ module lattice 0, 0, 1, 0, 1, 0, & 1, 0, 0, 0, 0, 1 & ],pReal),shape(LATTICE_ORT_SYSTEMCLEAVAGE)) - - - + + + ! BEGIN DEPRECATED integer, parameter, public :: & LATTICE_maxNcleavage = max(LATTICE_fcc_Ncleavage,LATTICE_bcc_Ncleavage, & LATTICE_hex_Ncleavage, & LATTICE_iso_Ncleavage,LATTICE_ort_Ncleavage) ! END DEPRECATED - + real(pReal), dimension(:,:,:), allocatable, public, protected :: & lattice_C66 real(pReal), dimension(:,:,:,:,:), allocatable, public, protected :: & lattice_C3333 real(pReal), dimension(:), allocatable, public, protected :: & lattice_mu, lattice_nu - + ! SHOULD NOT BE PART OF LATTICE BEGIN real(pReal), dimension(:,:,:,:), allocatable, public, protected :: & ! with higher-order parameters (e.g. temperature-dependent) lattice_thermalExpansion33 @@ -465,7 +465,7 @@ module lattice lattice_specificHeat, & lattice_referenceTemperature ! SHOULD NOT BE PART OF LATTICE END - + enum, bind(c) enumerator :: LATTICE_undefined_ID, & LATTICE_iso_ID, & @@ -475,18 +475,18 @@ module lattice LATTICE_bct_ID, & LATTICE_ort_ID end enum - + integer(kind(LATTICE_undefined_ID)), dimension(:), allocatable, public, protected :: & lattice_structure - + interface lattice_forestProjection_edge module procedure slipProjection_transverse end interface lattice_forestProjection_edge - + interface lattice_forestProjection_screw module procedure slipProjection_direction end interface lattice_forestProjection_screw - + public :: & lattice_init, & LATTICE_iso_ID, & @@ -516,29 +516,29 @@ module lattice lattice_slip_transverse, & lattice_labels_slip, & lattice_labels_twin - + contains !-------------------------------------------------------------------------------------------------- !> @brief Module initialization !-------------------------------------------------------------------------------------------------- subroutine lattice_init - + integer :: Nphases, p character(len=pStringLen) :: & tag = '' real(pReal) :: CoverA real(pReal), dimension(:), allocatable :: & temp - + write(6,'(/,a)') ' <<<+- lattice init -+>>>' - + Nphases = size(config_phase) - + allocate(lattice_structure(Nphases),source = LATTICE_undefined_ID) allocate(lattice_C66(6,6,Nphases), source=0.0_pReal) allocate(lattice_C3333(3,3,3,3,Nphases), source=0.0_pReal) - + allocate(lattice_thermalExpansion33 (3,3,3,Nphases), source=0.0_pReal) ! constant, linear, quadratic coefficients allocate(lattice_thermalConductivity33 (3,3,Nphases), source=0.0_pReal) allocate(lattice_damageDiffusion33 (3,3,Nphases), source=0.0_pReal) @@ -546,17 +546,16 @@ subroutine lattice_init allocate(lattice_massDensity ( Nphases), source=0.0_pReal) allocate(lattice_specificHeat ( Nphases), source=0.0_pReal) allocate(lattice_referenceTemperature ( Nphases), source=300.0_pReal) - + allocate(lattice_mu(Nphases), source=0.0_pReal) allocate(lattice_nu(Nphases), source=0.0_pReal) - - + allocate(lattice_Scleavage(3,3,3,lattice_maxNcleavage,Nphases),source=0.0_pReal) allocate(lattice_NcleavageSystem(lattice_maxNcleavageFamily,Nphases),source=0) - + do p = 1, size(config_phase) tag = config_phase(p)%getString('lattice_structure') - select case(trim(tag(1:3))) + select case(tag(1:3)) case('iso') lattice_structure(p) = LATTICE_iso_ID case('fcc') @@ -569,9 +568,11 @@ subroutine lattice_init lattice_structure(p) = LATTICE_bct_ID case('ort') lattice_structure(p) = LATTICE_ort_ID + case default + call IO_error(130,ext_msg='lattice_init') end select - - + + lattice_C66(1,1,p) = config_phase(p)%getFloat('c11',defaultVal=0.0_pReal) lattice_C66(1,2,p) = config_phase(p)%getFloat('c12',defaultVal=0.0_pReal) lattice_C66(1,3,p) = config_phase(p)%getFloat('c13',defaultVal=0.0_pReal) @@ -581,21 +582,21 @@ subroutine lattice_init lattice_C66(4,4,p) = config_phase(p)%getFloat('c44',defaultVal=0.0_pReal) lattice_C66(5,5,p) = config_phase(p)%getFloat('c55',defaultVal=0.0_pReal) lattice_C66(6,6,p) = config_phase(p)%getFloat('c66',defaultVal=0.0_pReal) - - + + CoverA = config_phase(p)%getFloat('c/a',defaultVal=0.0_pReal) - + lattice_thermalConductivity33(1,1,p) = config_phase(p)%getFloat('thermal_conductivity11',defaultVal=0.0_pReal) lattice_thermalConductivity33(2,2,p) = config_phase(p)%getFloat('thermal_conductivity22',defaultVal=0.0_pReal) lattice_thermalConductivity33(3,3,p) = config_phase(p)%getFloat('thermal_conductivity33',defaultVal=0.0_pReal) - + temp = config_phase(p)%getFloats('thermal_expansion11',defaultVal=[0.0_pReal]) ! read up to three parameters (constant, linear, quadratic with T) lattice_thermalExpansion33(1,1,1:size(temp),p) = temp temp = config_phase(p)%getFloats('thermal_expansion22',defaultVal=[0.0_pReal]) ! read up to three parameters (constant, linear, quadratic with T) lattice_thermalExpansion33(2,2,1:size(temp),p) = temp temp = config_phase(p)%getFloats('thermal_expansion33',defaultVal=[0.0_pReal]) ! read up to three parameters (constant, linear, quadratic with T) lattice_thermalExpansion33(3,3,1:size(temp),p) = temp - + lattice_specificHeat(p) = config_phase(p)%getFloat( 'specific_heat',defaultVal=0.0_pReal) lattice_massDensity(p) = config_phase(p)%getFloat( 'mass_density',defaultVal=0.0_pReal) lattice_referenceTemperature(p) = config_phase(p)%getFloat( 'reference_temperature',defaultVal=0.0_pReal) @@ -603,17 +604,17 @@ subroutine lattice_init lattice_DamageDiffusion33(2,2,p) = config_phase(p)%getFloat( 'damage_diffusion22',defaultVal=0.0_pReal) lattice_DamageDiffusion33(3,3,p) = config_phase(p)%getFloat( 'damage_diffusion33',defaultVal=0.0_pReal) lattice_DamageMobility(p) = config_phase(p)%getFloat( 'damage_mobility',defaultVal=0.0_pReal) - + if ((CoverA < 1.0_pReal .or. CoverA > 2.0_pReal) & .and. lattice_structure(p) == LATTICE_hex_ID) call IO_error(131,el=p) ! checking physical significance of c/a if ((CoverA > 2.0_pReal) & .and. lattice_structure(p) == LATTICE_bct_ID) call IO_error(131,el=p) ! checking physical significance of c/a call lattice_initializeStructure(p, CoverA) enddo - + end subroutine lattice_init - - + + !-------------------------------------------------------------------------------------------------- !> @brief !!!!!!!DEPRECTATED!!!!!! !-------------------------------------------------------------------------------------------------- @@ -622,14 +623,13 @@ subroutine lattice_initializeStructure(myPhase,CoverA) integer, intent(in) :: myPhase real(pReal), intent(in) :: & CoverA - + integer :: & - i, & - myNcleavage - + i + lattice_C66(1:6,1:6,myPhase) = lattice_symmetrizeC66(lattice_structure(myPhase),& lattice_C66(1:6,1:6,myPhase)) - + lattice_mu(myPhase) = 0.2_pReal *( lattice_C66(1,1,myPhase) & - lattice_C66(1,2,myPhase) & + 3.0_pReal*lattice_C66(4,4,myPhase)) ! (C11iso-C12iso)/2 with C11iso=(3*C11+2*C12+4*C44)/5 and C12iso=(C11+4*C12-2*C44)/5 @@ -645,79 +645,62 @@ subroutine lattice_initializeStructure(myPhase,CoverA) if (abs(lattice_C66(i,i,myPhase)) @brief Symmetrizes stiffness matrix according to lattice type !> @details J. A. Rayne and B. S. Chandrasekhar Phys. Rev. 120, 1658 Erratum Phys. Rev. 122, 1962 !-------------------------------------------------------------------------------------------------- pure function lattice_symmetrizeC66(struct,C66) - + integer(kind(LATTICE_undefined_ID)), intent(in) :: struct real(pReal), dimension(6,6), intent(in) :: C66 real(pReal), dimension(6,6) :: lattice_symmetrizeC66 integer :: j,k - + lattice_symmetrizeC66 = 0.0_pReal - + select case(struct) case (LATTICE_iso_ID) do k=1,3 @@ -777,22 +760,22 @@ pure function lattice_symmetrizeC66(struct,C66) case default lattice_symmetrizeC66 = C66 end select - + end function lattice_symmetrizeC66 - - + + !-------------------------------------------------------------------------------------------------- !> @brief Symmetrizes 2nd order tensor according to lattice type !-------------------------------------------------------------------------------------------------- pure function lattice_symmetrize33(struct,T33) - + integer(kind(LATTICE_undefined_ID)), intent(in) :: struct real(pReal), dimension(3,3), intent(in) :: T33 real(pReal), dimension(3,3) :: lattice_symmetrize33 integer :: k - + lattice_symmetrize33 = 0.0_pReal - + select case(struct) case (LATTICE_iso_ID,LATTICE_fcc_ID,LATTICE_bcc_ID) do k=1,3 @@ -809,26 +792,26 @@ pure function lattice_symmetrize33(struct,T33) case default lattice_symmetrize33 = T33 end select - + end function lattice_symmetrize33 - - + + !-------------------------------------------------------------------------------------------------- !> @brief Characteristic shear for twinning !-------------------------------------------------------------------------------------------------- function lattice_characteristicShear_Twin(Ntwin,structure,CoverA) result(characteristicShear) - + integer, dimension(:), intent(in) :: Ntwin !< number of active twin systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(sum(Ntwin)) :: characteristicShear - + integer :: & a, & !< index of active system p, & !< index in potential system list f, & !< index of my family s !< index of my system in current family - + integer, dimension(LATTICE_HEX_NTWIN), parameter :: & HEX_SHEARTWIN = reshape( [& 1, & ! <-10.1>{10.2} @@ -856,10 +839,10 @@ function lattice_characteristicShear_Twin(Ntwin,structure,CoverA) result(charact 4, & 4 & ],[LATTICE_HEX_NTWIN]) ! indicator to formulas below - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_characteristicShear_Twin: '//trim(structure)) - + a = 0 myFamilies: do f = 1,size(Ntwin,1) mySystems: do s = 1,Ntwin(f) @@ -886,28 +869,28 @@ function lattice_characteristicShear_Twin(Ntwin,structure,CoverA) result(charact end select enddo mySystems enddo myFamilies - + end function lattice_characteristicShear_Twin - - + + !-------------------------------------------------------------------------------------------------- !> @brief Rotated elasticity matrices for twinning in 66-vector notation !-------------------------------------------------------------------------------------------------- function lattice_C66_twin(Ntwin,C66,structure,CoverA) - + integer, dimension(:), intent(in) :: Ntwin !< number of active twin systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), dimension(6,6), intent(in) :: C66 !< unrotated parent stiffness matrix real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(6,6,sum(Ntwin)) :: lattice_C66_twin - + real(pReal), dimension(3,3,sum(Ntwin)):: coordinateSystem type(rotation) :: R integer :: i - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_C66_twin: '//trim(structure)) - + select case(structure(1:3)) case('fcc') coordinateSystem = buildCoordinateSystem(Ntwin,LATTICE_FCC_NSLIPSYSTEM,LATTICE_FCC_SYSTEMTWIN,& @@ -921,35 +904,35 @@ function lattice_C66_twin(Ntwin,C66,structure,CoverA) case default call IO_error(137,ext_msg='lattice_C66_twin: '//trim(structure)) end select - + do i = 1, sum(Ntwin) call R%fromAxisAngle([coordinateSystem(1:3,2,i),PI],P=1) ! ToDo: Why always 180 deg? lattice_C66_twin(1:6,1:6,i) = R%rotTensor4sym(C66) enddo end function lattice_C66_twin - - + + !-------------------------------------------------------------------------------------------------- !> @brief Rotated elasticity matrices for transformation in 66-vector notation !-------------------------------------------------------------------------------------------------- function lattice_C66_trans(Ntrans,C_parent66,structure_target, & cOverA_trans,a_bcc,a_fcc) - + integer, dimension(:), intent(in) :: Ntrans !< number of active twin systems per family character(len=*), intent(in) :: structure_target !< lattice structure real(pReal), dimension(6,6), intent(in) :: C_parent66 real(pReal), dimension(6,6,sum(Ntrans)) :: lattice_C66_trans - + real(pReal), dimension(6,6) :: C_bar66, C_target_unrotated66 real(pReal), dimension(3,3,sum(Ntrans)) :: Q,S type(rotation) :: R real(pReal) :: a_bcc, a_fcc, cOverA_trans integer :: i - + if (len_trim(structure_target) /= 3) & call IO_error(137,ext_msg='lattice_C66_trans (target): '//trim(structure_target)) - + !-------------------------------------------------------------------------------------------------- ! elasticity matrix of the target phase in cube orientation if (structure_target(1:3) == 'hex') then @@ -961,7 +944,7 @@ function lattice_C66_trans(Ntrans,C_parent66,structure_target, & C_bar66(1,3) = (C_parent66(1,1) + 2.0_pReal*C_parent66(1,2) - 2.0_pReal*C_parent66(4,4))/3.0_pReal C_bar66(4,4) = (C_parent66(1,1) - C_parent66(1,2) + C_parent66(4,4))/3.0_pReal C_bar66(1,4) = (C_parent66(1,1) - C_parent66(1,2) - 2.0_pReal*C_parent66(4,4)) /(3.0_pReal*sqrt(2.0_pReal)) - + C_target_unrotated66 = 0.0_pReal C_target_unrotated66(1,1) = C_bar66(1,1) - C_bar66(1,4)**2.0_pReal/C_bar66(4,4) C_target_unrotated66(1,2) = C_bar66(1,2) + C_bar66(1,4)**2.0_pReal/C_bar66(4,4) @@ -976,22 +959,22 @@ function lattice_C66_trans(Ntrans,C_parent66,structure_target, & else call IO_error(137,ext_msg='lattice_C66_trans : '//trim(structure_target)) endif - + do i = 1, 6 if (abs(C_target_unrotated66(i,i)) @brief Non-schmid projections for bcc with up to 6 coefficients ! Koester et al. 2012, Acta Materialia 60 (2012) 3894–3901, eq. (17) @@ -1003,19 +986,19 @@ function lattice_nonSchmidMatrix(Nslip,nonSchmidCoefficients,sense) result(nonSc real(pReal), dimension(:), intent(in) :: nonSchmidCoefficients !< non-Schmid coefficients for projections integer, intent(in) :: sense !< sense (-1,+1) real(pReal), dimension(1:3,1:3,sum(Nslip)) :: nonSchmidMatrix - + real(pReal), dimension(1:3,1:3,sum(Nslip)) :: coordinateSystem !< coordinate system of slip system real(pReal), dimension(3) :: direction, normal, np type(rotation) :: R integer :: i - + if (abs(sense) /= 1) call IO_error(0,ext_msg='lattice_nonSchmidMatrix') - + coordinateSystem = buildCoordinateSystem(Nslip,LATTICE_BCC_NSLIPSYSTEM,LATTICE_BCC_SYSTEMSLIP,& 'bcc',0.0_pReal) coordinateSystem(1:3,1,1:sum(Nslip)) = coordinateSystem(1:3,1,1:sum(Nslip)) *real(sense,pReal) ! convert unidirectional coordinate system nonSchmidMatrix = lattice_SchmidMatrix_slip(Nslip,'bcc',0.0_pReal) ! Schmid contribution - + do i = 1,sum(Nslip) direction = coordinateSystem(1:3,1,i) normal = coordinateSystem(1:3,2,i) @@ -1038,8 +1021,8 @@ function lattice_nonSchmidMatrix(Nslip,nonSchmidCoefficients,sense) result(nonSc enddo end function lattice_nonSchmidMatrix - - + + !-------------------------------------------------------------------------------------------------- !> @brief Slip-slip interaction matrix !> details only active slip systems are considered @@ -1050,10 +1033,10 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul real(pReal), dimension(:), intent(in) :: interactionValues !< values for slip-slip interaction character(len=*), intent(in) :: structure !< lattice structure real(pReal), dimension(sum(Nslip),sum(Nslip)) :: interactionMatrix - + integer, dimension(:), allocatable :: NslipMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NSLIP,LATTICE_FCC_NSLIP), parameter :: & FCC_INTERACTIONSLIPSLIP = reshape( [& 1, 2, 2, 4, 6, 5, 3, 5, 5, 4, 5, 6, 9,10, 9,10,11,12, & ! -----> acting @@ -1068,7 +1051,7 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul 4, 5, 6, 3, 5, 5, 4, 6, 5, 1, 2, 2, 10, 9, 9,10,12,11, & 5, 3, 5, 5, 4, 6, 6, 4, 5, 2, 1, 2, 10, 9,11,12,10, 9, & 6, 5, 4, 5, 6, 4, 5, 5, 3, 2, 2, 1, 12,11, 9,10,10, 9, & - + 9, 9,11, 9, 9,11,10,10,12,10,10,12, 1, 7, 8, 8, 8, 8, & 10,10,12,10,10,12, 9, 9,11, 9, 9,11, 7, 1, 8, 8, 8, 8, & 9,11, 9,10,12,10,10,12,10, 9,11, 9, 8, 8, 1, 7, 8, 8, & @@ -1088,7 +1071,7 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul !<10: similar to glissile junctions in <110>{111} btw one {110} and one {111} plane !<11: crossing btw one {110} and one {111} plane !<12: collinear btw one {110} and one {111} plane - + integer, dimension(LATTICE_BCC_NSLIP,LATTICE_BCC_NSLIP), parameter :: & BCC_INTERACTIONSLIPSLIP = reshape( [& 1,2,6,6,5,4,4,3,4,3,5,4, 6,6,4,3,3,4,6,6,4,3,6,6, & ! -----> acting @@ -1123,7 +1106,7 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul !< 4: mixed-asymmetrical junction !< 5: mixed-symmetrical junction !< 6: edge junction - + integer, dimension(LATTICE_HEX_NSLIP,LATTICE_HEX_NSLIP), parameter :: & HEX_INTERACTIONSLIPSLIP = reshape( [& 1, 2, 2, 3, 3, 3, 7, 7, 7, 13,13,13,13,13,13, 21,21,21,21,21,21,21,21,21,21,21,21, 31,31,31,31,31,31, & ! -----> acting @@ -1165,7 +1148,7 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul 42,42,42, 41,41,41, 40,40,40, 39,39,39,39,39,39, 38,38,38,38,38,38,38,38,38,38,38,38, 37,37,37,37,36,37, & 42,42,42, 41,41,41, 40,40,40, 39,39,39,39,39,39, 38,38,38,38,38,38,38,38,38,38,38,38, 37,37,37,37,37,36 & ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip--slip interaction types for hex (onion peel naming scheme) - + integer, dimension(LATTICE_BCT_NSLIP,LATTICE_BCT_NSLIP), parameter :: & BCT_INTERACTIONSLIPSLIP = reshape( [& 1, 2, 3, 3, 7, 7, 13, 13, 13, 13, 21, 21, 31, 31, 31, 31, 43, 43, 57, 57, 73, 73, 73, 73, 91, 91, 91, 91, 91, 91, 91, 91, 111, 111, 111, 111, 133,133,133,133,133,133,133,133, 157,157,157,157,157,157,157,157, & ! -----> acting @@ -1233,11 +1216,11 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul 182,182, 181,181, 180,180, 179,179,179,179, 178,178, 177,177,177,177, 176,176, 175,175, 174,174,174,174, 173,173,173,173,173,173,173,173, 172, 172, 172, 172, 171,171,171,171,171,171,171,171, 169,170,170,170,170,170,169,170, & 182,182, 181,181, 180,180, 179,179,179,179, 178,178, 177,177,177,177, 176,176, 175,175, 174,174,174,174, 173,173,173,173,173,173,173,173, 172, 172, 172, 172, 171,171,171,171,171,171,171,171, 169,170,170,170,170,170,170,169 & ],shape(BCT_INTERACTIONSLIPSLIP)) - - + + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_SlipBySlip: '//trim(structure)) - + select case(structure(1:3)) case('fcc') interactionTypes = FCC_INTERACTIONSLIPSLIP @@ -1254,26 +1237,26 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,structure) resul case default call IO_error(137,ext_msg='lattice_interaction_SlipBySlip: '//trim(structure)) end select - + interactionMatrix = buildInteraction(Nslip,Nslip,NslipMax,NslipMax,interactionValues,interactionTypes) - + end function lattice_interaction_SlipBySlip - - + + !-------------------------------------------------------------------------------------------------- !> @brief Twin-twin interaction matrix !> details only active twin systems are considered !-------------------------------------------------------------------------------------------------- function lattice_interaction_TwinByTwin(Ntwin,interactionValues,structure) result(interactionMatrix) - + integer, dimension(:), intent(in) :: Ntwin !< number of active twin systems per family real(pReal), dimension(:), intent(in) :: interactionValues !< values for twin-twin interaction character(len=*), intent(in) :: structure !< lattice structure real(pReal), dimension(sum(Ntwin),sum(Ntwin)) :: interactionMatrix - + integer, dimension(:), allocatable :: NtwinMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NTWIN,LATTICE_FCC_NTWIN), parameter :: & FCC_INTERACTIONTWINTWIN = reshape( [& 1,1,1,2,2,2,2,2,2,2,2,2, & ! -----> acting @@ -1289,7 +1272,7 @@ function lattice_interaction_TwinByTwin(Ntwin,interactionValues,structure) resul 2,2,2,2,2,2,2,2,2,1,1,1, & 2,2,2,2,2,2,2,2,2,1,1,1 & ],shape(FCC_INTERACTIONTWINTWIN)) !< Twin-twin interaction types for fcc - + integer, dimension(LATTICE_BCC_NTWIN,LATTICE_BCC_NTWIN), parameter :: & BCC_INTERACTIONTWINTWIN = reshape( [& 1,3,3,3,3,3,3,2,3,3,2,3, & ! -----> acting @@ -1338,10 +1321,10 @@ function lattice_interaction_TwinByTwin(Ntwin,interactionValues,structure) resul 20,20,20,20,20,20, 19,19,19,19,19,19, 18,18,18,18,18,18, 17,17,17,17,16,17, & 20,20,20,20,20,20, 19,19,19,19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,16 & ],shape(HEX_INTERACTIONTWINTWIN)) !< Twin-twin interaction types for hex - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_TwinByTwin: '//trim(structure)) - + select case(structure(1:3)) case('fcc') interactionTypes = FCC_INTERACTIONTWINTWIN @@ -1355,26 +1338,26 @@ function lattice_interaction_TwinByTwin(Ntwin,interactionValues,structure) resul case default call IO_error(137,ext_msg='lattice_interaction_TwinByTwin: '//trim(structure)) end select - + interactionMatrix = buildInteraction(Ntwin,Ntwin,NtwinMax,NtwinMax,interactionValues,interactionTypes) - + end function lattice_interaction_TwinByTwin - - + + !-------------------------------------------------------------------------------------------------- !> @brief Trans-trans interaction matrix !> details only active trans systems are considered !-------------------------------------------------------------------------------------------------- function lattice_interaction_TransByTrans(Ntrans,interactionValues,structure) result(interactionMatrix) - + integer, dimension(:), intent(in) :: Ntrans !< number of active trans systems per family real(pReal), dimension(:), intent(in) :: interactionValues !< values for trans-trans interaction character(len=*), intent(in) :: structure !< lattice structure (parent crystal) real(pReal), dimension(sum(Ntrans),sum(Ntrans)) :: interactionMatrix - + integer, dimension(:), allocatable :: NtransMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NTRANS,LATTICE_FCC_NTRANS), parameter :: & FCC_INTERACTIONTRANSTRANS = reshape( [& 1,1,1,2,2,2,2,2,2,2,2,2, & ! -----> acting @@ -1390,44 +1373,44 @@ function lattice_interaction_TransByTrans(Ntrans,interactionValues,structure) re 2,2,2,2,2,2,2,2,2,1,1,1, & 2,2,2,2,2,2,2,2,2,1,1,1 & ],shape(FCC_INTERACTIONTRANSTRANS)) !< Trans-trans interaction types for fcc - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_TransByTrans: '//trim(structure)) - + if(structure(1:3) == 'fcc') then interactionTypes = FCC_INTERACTIONTRANSTRANS NtransMax = LATTICE_FCC_NTRANSSYSTEM else call IO_error(137,ext_msg='lattice_interaction_TransByTrans: '//trim(structure)) end if - + interactionMatrix = buildInteraction(Ntrans,Ntrans,NtransMax,NtransMax,interactionValues,interactionTypes) - + end function lattice_interaction_TransByTrans - - + + !-------------------------------------------------------------------------------------------------- !> @brief Slip-twin interaction matrix !> details only active slip and twin systems are considered !-------------------------------------------------------------------------------------------------- function lattice_interaction_SlipByTwin(Nslip,Ntwin,interactionValues,structure) result(interactionMatrix) - + integer, dimension(:), intent(in) :: Nslip, & !< number of active slip systems per family Ntwin !< number of active twin systems per family real(pReal), dimension(:), intent(in) :: interactionValues !< values for slip-twin interaction character(len=*), intent(in) :: structure !< lattice structure real(pReal), dimension(sum(Nslip),sum(Ntwin)) :: interactionMatrix - + integer, dimension(:), allocatable :: NslipMax, & NtwinMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NTWIN,LATTICE_FCC_NSLIP), parameter :: & FCC_INTERACTIONSLIPTWIN = reshape( [& 1,1,1,3,3,3,2,2,2,3,3,3, & ! -----> twin (acting) 1,1,1,3,3,3,3,3,3,2,2,2, & ! | 1,1,1,2,2,2,3,3,3,3,3,3, & ! | - 3,3,3,1,1,1,3,3,3,2,2,2, & ! v + 3,3,3,1,1,1,3,3,3,2,2,2, & ! v 3,3,3,1,1,1,2,2,2,3,3,3, & ! slip (reacting) 2,2,2,1,1,1,3,3,3,3,3,3, & 2,2,2,3,3,3,1,1,1,3,3,3, & @@ -1436,7 +1419,7 @@ function lattice_interaction_SlipByTwin(Nslip,Ntwin,interactionValues,structure) 3,3,3,2,2,2,3,3,3,1,1,1, & 2,2,2,3,3,3,3,3,3,1,1,1, & 3,3,3,3,3,3,2,2,2,1,1,1, & - + 4,4,4,4,4,4,4,4,4,4,4,4, & 4,4,4,4,4,4,4,4,4,4,4,4, & 4,4,4,4,4,4,4,4,4,4,4,4, & @@ -1520,10 +1503,10 @@ function lattice_interaction_SlipByTwin(Nslip,Ntwin,interactionValues,structure) 21,21,21,21,21,21, 22,22,22,22,22,22, 23,23,23,23,23,23, 24,24,24,24,24,24 & ! ],shape(HEX_INTERACTIONSLIPTWIN)) !< Slip-twin interaction types for hex - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_SlipByTwin: '//trim(structure)) - + select case(structure(1:3)) case('fcc') interactionTypes = FCC_INTERACTIONSLIPTWIN @@ -1540,28 +1523,28 @@ function lattice_interaction_SlipByTwin(Nslip,Ntwin,interactionValues,structure) case default call IO_error(137,ext_msg='lattice_interaction_SlipByTwin: '//trim(structure)) end select - + interactionMatrix = buildInteraction(Nslip,Ntwin,NslipMax,NtwinMax,interactionValues,interactionTypes) - + end function lattice_interaction_SlipByTwin - - + + !-------------------------------------------------------------------------------------------------- !> @brief Slip-trans interaction matrix !> details only active slip and trans systems are considered !-------------------------------------------------------------------------------------------------- function lattice_interaction_SlipByTrans(Nslip,Ntrans,interactionValues,structure) result(interactionMatrix) - + integer, dimension(:), intent(in) :: Nslip, & !< number of active slip systems per family Ntrans !< number of active trans systems per family real(pReal), dimension(:), intent(in) :: interactionValues !< values for slip-trans interaction character(len=*), intent(in) :: structure !< lattice structure (parent crystal) real(pReal), dimension(sum(Nslip),sum(Ntrans)) :: interactionMatrix - + integer, dimension(:), allocatable :: NslipMax, & NtransMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NTRANS,LATTICE_FCC_NSLIP), parameter :: & FCC_INTERACTIONSLIPTRANS = reshape( [& 1,1,1,3,3,3,2,2,2,3,3,3, & ! -----> trans (acting) @@ -1576,7 +1559,7 @@ function lattice_interaction_SlipByTrans(Nslip,Ntrans,interactionValues,structur 3,3,3,2,2,2,3,3,3,1,1,1, & 2,2,2,3,3,3,3,3,3,1,1,1, & 3,3,3,3,3,3,2,2,2,1,1,1, & - + 4,4,4,4,4,4,4,4,4,4,4,4, & 4,4,4,4,4,4,4,4,4,4,4,4, & 4,4,4,4,4,4,4,4,4,4,4,4, & @@ -1584,10 +1567,10 @@ function lattice_interaction_SlipByTrans(Nslip,Ntrans,interactionValues,structur 4,4,4,4,4,4,4,4,4,4,4,4, & 4,4,4,4,4,4,4,4,4,4,4,4 & ],shape(FCC_INTERACTIONSLIPTRANS)) !< Slip-trans interaction types for fcc - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_SlipByTrans: '//trim(structure)) - + select case(structure(1:3)) case('fcc') interactionTypes = FCC_INTERACTIONSLIPTRANS @@ -1596,34 +1579,34 @@ function lattice_interaction_SlipByTrans(Nslip,Ntrans,interactionValues,structur case default call IO_error(137,ext_msg='lattice_interaction_SlipByTrans: '//trim(structure)) end select - + interactionMatrix = buildInteraction(Nslip,Ntrans,NslipMax,NtransMax,interactionValues,interactionTypes) - + end function lattice_interaction_SlipByTrans - - + + !-------------------------------------------------------------------------------------------------- !> @brief Twin-slip interaction matrix !> details only active twin and slip systems are considered !-------------------------------------------------------------------------------------------------- function lattice_interaction_TwinBySlip(Ntwin,Nslip,interactionValues,structure) result(interactionMatrix) - + integer, dimension(:), intent(in) :: Ntwin, & !< number of active twin systems per family Nslip !< number of active slip systems per family real(pReal), dimension(:), intent(in) :: interactionValues !< values for twin-twin interaction character(len=*), intent(in) :: structure !< lattice structure real(pReal), dimension(sum(Ntwin),sum(Nslip)) :: interactionMatrix - + integer, dimension(:), allocatable :: NtwinMax, & NslipMax integer, dimension(:,:), allocatable :: interactionTypes - + integer, dimension(LATTICE_FCC_NSLIP,LATTICE_FCC_NTWIN), parameter :: & FCC_INTERACTIONTWINSLIP = 1 !< Twin-slip interaction types for fcc - + integer, dimension(LATTICE_BCC_NSLIP,LATTICE_BCC_NTWIN), parameter :: & BCC_INTERACTIONTWINSLIP = 1 !< Twin-slip interaction types for bcc - + integer, dimension(LATTICE_HEX_NSLIP,LATTICE_HEX_NTWIN), parameter :: & HEX_INTERACTIONTWINSLIP = reshape( [& 1, 1, 1, 5, 5, 5, 9, 9, 9, 13,13,13,13,13,13, 17,17,17,17,17,17,17,17,17,17,17,17, 21,21,21,21,21,21, & ! ----> slip (acting) @@ -1654,10 +1637,10 @@ function lattice_interaction_TwinBySlip(Ntwin,Nslip,interactionValues,structure) 4, 4, 4, 8, 8, 8, 12,12,12, 16,16,16,16,16,16, 20,20,20,20,20,20,20,20,20,20,20,20, 24,24,24,24,24,24, & 4, 4, 4, 8, 8, 8, 12,12,12, 16,16,16,16,16,16, 20,20,20,20,20,20,20,20,20,20,20,20, 24,24,24,24,24,24 & ],shape(HEX_INTERACTIONTWINSLIP)) !< Twin-slip interaction types for hex - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_interaction_TwinBySlip: '//trim(structure)) - + select case(structure(1:3)) case('fcc') interactionTypes = FCC_INTERACTIONTWINSLIP @@ -1674,31 +1657,31 @@ function lattice_interaction_TwinBySlip(Ntwin,Nslip,interactionValues,structure) case default call IO_error(137,ext_msg='lattice_interaction_TwinBySlip: '//trim(structure)) end select - + interactionMatrix = buildInteraction(Ntwin,Nslip,NtwinMax,NslipMax,interactionValues,interactionTypes) - + end function lattice_interaction_TwinBySlip - - + + !-------------------------------------------------------------------------------------------------- !> @brief Schmid matrix for slip !> details only active slip systems are considered !-------------------------------------------------------------------------------------------------- function lattice_SchmidMatrix_slip(Nslip,structure,cOverA) result(SchmidMatrix) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA real(pReal), dimension(3,3,sum(Nslip)) :: SchmidMatrix - + real(pReal), dimension(3,3,sum(Nslip)) :: coordinateSystem real(pReal), dimension(:,:), allocatable :: slipSystems integer, dimension(:), allocatable :: NslipMax integer :: i - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_SchmidMatrix_slip: '//trim(structure)) - + select case(structure(1:3)) case('fcc') NslipMax = LATTICE_FCC_NSLIPSYSTEM @@ -1715,42 +1698,42 @@ function lattice_SchmidMatrix_slip(Nslip,structure,cOverA) result(SchmidMatrix) case default call IO_error(137,ext_msg='lattice_SchmidMatrix_slip: '//trim(structure)) end select - + if (any(NslipMax(1:size(Nslip)) - Nslip < 0)) & call IO_error(145,ext_msg='Nslip '//trim(structure)) if (any(Nslip < 0)) & call IO_error(144,ext_msg='Nslip '//trim(structure)) - + coordinateSystem = buildCoordinateSystem(Nslip,NslipMax,slipSystems,structure,cOverA) - + do i = 1, sum(Nslip) SchmidMatrix(1:3,1:3,i) = math_outer(coordinateSystem(1:3,1,i),coordinateSystem(1:3,2,i)) if (abs(math_trace33(SchmidMatrix(1:3,1:3,i))) > tol_math_check) & call IO_error(0,i,ext_msg = 'dilatational Schmid matrix for slip') enddo - + end function lattice_SchmidMatrix_slip - - + + !-------------------------------------------------------------------------------------------------- !> @brief Schmid matrix for twinning !> details only active twin systems are considered !-------------------------------------------------------------------------------------------------- function lattice_SchmidMatrix_twin(Ntwin,structure,cOverA) result(SchmidMatrix) - + integer, dimension(:), intent(in) :: Ntwin !< number of active twin systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,3,sum(Ntwin)) :: SchmidMatrix - + real(pReal), dimension(3,3,sum(Ntwin)) :: coordinateSystem real(pReal), dimension(:,:), allocatable :: twinSystems integer, dimension(:), allocatable :: NtwinMax integer :: i - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_SchmidMatrix_twin: '//trim(structure)) - + select case(structure(1:3)) case('fcc') NtwinMax = LATTICE_FCC_NTWINSYSTEM @@ -1764,37 +1747,37 @@ function lattice_SchmidMatrix_twin(Ntwin,structure,cOverA) result(SchmidMatrix) case default call IO_error(137,ext_msg='lattice_SchmidMatrix_twin: '//trim(structure)) end select - + if (any(NtwinMax(1:size(Ntwin)) - Ntwin < 0)) & call IO_error(145,ext_msg='Ntwin '//trim(structure)) if (any(Ntwin < 0)) & call IO_error(144,ext_msg='Ntwin '//trim(structure)) - + coordinateSystem = buildCoordinateSystem(Ntwin,NtwinMax,twinSystems,structure,cOverA) - + do i = 1, sum(Ntwin) SchmidMatrix(1:3,1:3,i) = math_outer(coordinateSystem(1:3,1,i),coordinateSystem(1:3,2,i)) if (abs(math_trace33(SchmidMatrix(1:3,1:3,i))) > tol_math_check) & call IO_error(0,i,ext_msg = 'dilatational Schmid matrix for twin') enddo - + end function lattice_SchmidMatrix_twin - - + + !-------------------------------------------------------------------------------------------------- !> @brief Schmid matrix for twinning !> details only active twin systems are considered !-------------------------------------------------------------------------------------------------- function lattice_SchmidMatrix_trans(Ntrans,structure_target,cOverA,a_bcc,a_fcc) result(SchmidMatrix) - + integer, dimension(:), intent(in) :: Ntrans !< number of active twin systems per family character(len=*), intent(in) :: structure_target !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,3,sum(Ntrans)) :: SchmidMatrix - + real(pReal), dimension(3,3,sum(Ntrans)) :: devNull real(pReal) :: a_bcc, a_fcc - + if (len_trim(structure_target) /= 3) & call IO_error(137,ext_msg='lattice_SchmidMatrix_trans: '//trim(structure_target)) if (structure_target(1:3) /= 'bcc' .and. structure_target(1:3) /= 'hex') & @@ -1802,15 +1785,15 @@ function lattice_SchmidMatrix_trans(Ntrans,structure_target,cOverA,a_bcc,a_fcc) if (structure_target(1:3) == 'hex' .and. (cOverA < 1.0_pReal .or. cOverA > 2.0_pReal)) & call IO_error(131,ext_msg='lattice_SchmidMatrix_trans: '//trim(structure_target)) - + if (structure_target(1:3) == 'bcc' .and. (a_bcc <= 0.0_pReal .or. a_fcc <= 0.0_pReal)) & call IO_error(134,ext_msg='lattice_SchmidMatrix_trans: '//trim(structure_target)) - + call buildTransformationSystem(devNull,SchmidMatrix,Ntrans,cOverA,a_fcc,a_bcc) - + end function lattice_SchmidMatrix_trans - - + + !-------------------------------------------------------------------------------------------------- !> @brief Schmid matrix for cleavage !> details only active cleavage systems are considered @@ -1821,15 +1804,15 @@ function lattice_SchmidMatrix_cleavage(Ncleavage,structure,cOverA) result(Schmid character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,3,3,sum(Ncleavage)) :: SchmidMatrix - + real(pReal), dimension(3,3,sum(Ncleavage)) :: coordinateSystem real(pReal), dimension(:,:), allocatable :: cleavageSystems integer, dimension(:), allocatable :: NcleavageMax integer :: i - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_SchmidMatrix_cleavage: '//trim(structure)) - + select case(structure(1:3)) case('iso') NcleavageMax = LATTICE_ISO_NCLEAVAGESYSTEM @@ -1849,56 +1832,56 @@ function lattice_SchmidMatrix_cleavage(Ncleavage,structure,cOverA) result(Schmid case default call IO_error(137,ext_msg='lattice_SchmidMatrix_cleavage: '//trim(structure)) end select - + if (any(NcleavageMax(1:size(Ncleavage)) - Ncleavage < 0)) & call IO_error(145,ext_msg='Ncleavage '//trim(structure)) if (any(Ncleavage < 0)) & call IO_error(144,ext_msg='Ncleavage '//trim(structure)) - + coordinateSystem = buildCoordinateSystem(Ncleavage,NcleavageMax,cleavageSystems,structure,cOverA) - + do i = 1, sum(Ncleavage) SchmidMatrix(1:3,1:3,1,i) = math_outer(coordinateSystem(1:3,1,i),coordinateSystem(1:3,2,i)) SchmidMatrix(1:3,1:3,2,i) = math_outer(coordinateSystem(1:3,3,i),coordinateSystem(1:3,2,i)) SchmidMatrix(1:3,1:3,3,i) = math_outer(coordinateSystem(1:3,2,i),coordinateSystem(1:3,2,i)) enddo - + end function lattice_SchmidMatrix_cleavage - - + + !-------------------------------------------------------------------------------------------------- !> @brief Slip direction of slip systems (|| b) !-------------------------------------------------------------------------------------------------- function lattice_slip_direction(Nslip,structure,cOverA) result(d) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,sum(Nslip)) :: d - + real(pReal), dimension(3,3,sum(Nslip)) :: coordinateSystem - + coordinateSystem = coordinateSystem_slip(Nslip,structure,cOverA) d = coordinateSystem(1:3,1,1:sum(Nslip)) - + end function lattice_slip_direction - - + + !-------------------------------------------------------------------------------------------------- !> @brief Normal direction of slip systems (|| n) !-------------------------------------------------------------------------------------------------- function lattice_slip_normal(Nslip,structure,cOverA) result(n) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,sum(Nslip)) :: n - + real(pReal), dimension(3,3,sum(Nslip)) :: coordinateSystem - + coordinateSystem = coordinateSystem_slip(Nslip,structure,cOverA) n = coordinateSystem(1:3,2,1:sum(Nslip)) - + end function lattice_slip_normal @@ -1906,41 +1889,41 @@ end function lattice_slip_normal !> @brief Transverse direction of slip systems ( || t = b x n) !-------------------------------------------------------------------------------------------------- function lattice_slip_transverse(Nslip,structure,cOverA) result(t) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,sum(Nslip)) :: t - + real(pReal), dimension(3,3,sum(Nslip)) :: coordinateSystem - + coordinateSystem = coordinateSystem_slip(Nslip,structure,cOverA) t = coordinateSystem(1:3,3,1:sum(Nslip)) - + end function lattice_slip_transverse - - + + !-------------------------------------------------------------------------------------------------- !> @brief Projection of the transverse direction onto the slip plane !> @details: This projection is used to calculate forest hardening for edge dislocations !-------------------------------------------------------------------------------------------------- function slipProjection_transverse(Nslip,structure,cOverA) result(projection) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(sum(Nslip),sum(Nslip)) :: projection - + real(pReal), dimension(3,sum(Nslip)) :: n, t integer :: i, j - + n = lattice_slip_normal (Nslip,structure,cOverA) t = lattice_slip_transverse(Nslip,structure,cOverA) - + do i=1, sum(Nslip); do j=1, sum(Nslip) projection(i,j) = abs(math_inner(n(:,i),t(:,j))) enddo; enddo - + end function slipProjection_transverse @@ -1952,15 +1935,15 @@ function lattice_labels_slip(Nslip,structure) result(labels) integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure - + character(len=:), dimension(:), allocatable :: labels real(pReal), dimension(:,:), allocatable :: slipSystems integer, dimension(:), allocatable :: NslipMax - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_labels_slip: '//trim(structure)) - + select case(structure(1:3)) case('fcc') NslipMax = LATTICE_FCC_NSLIPSYSTEM @@ -1977,14 +1960,14 @@ function lattice_labels_slip(Nslip,structure) result(labels) case default call IO_error(137,ext_msg='lattice_labels_slip: '//trim(structure)) end select - + if (any(NslipMax(1:size(Nslip)) - Nslip < 0)) & call IO_error(145,ext_msg='Nslip '//trim(structure)) if (any(Nslip < 0)) & call IO_error(144,ext_msg='Nslip '//trim(structure)) - + labels = getLabels(Nslip,NslipMax,slipSystems) - + end function lattice_labels_slip @@ -1996,15 +1979,15 @@ function lattice_labels_twin(Ntwin,structure) result(labels) integer, dimension(:), intent(in) :: Ntwin !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure - + character(len=:), dimension(:), allocatable :: labels real(pReal), dimension(:,:), allocatable :: twinSystems integer, dimension(:), allocatable :: NtwinMax - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='lattice_labels_twin: '//trim(structure)) - + select case(structure(1:3)) case('fcc') NtwinMax = LATTICE_FCC_NTWINSYSTEM @@ -2018,14 +2001,14 @@ function lattice_labels_twin(Ntwin,structure) result(labels) case default call IO_error(137,ext_msg='lattice_labels_twin: '//trim(structure)) end select - + if (any(NtwinMax(1:size(Ntwin)) - Ntwin < 0)) & call IO_error(145,ext_msg='Ntwin '//trim(structure)) if (any(Ntwin < 0)) & call IO_error(144,ext_msg='Ntwin '//trim(structure)) - + labels = getLabels(Ntwin,NtwinMax,twinSystems) - + end function lattice_labels_twin @@ -2034,25 +2017,25 @@ end function lattice_labels_twin !> @details: This projection is used to calculate forest hardening for screw dislocations !-------------------------------------------------------------------------------------------------- function slipProjection_direction(Nslip,structure,cOverA) result(projection) - + integer, dimension(:), intent(in) :: Nslip !< number of active slip systems per family character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(sum(Nslip),sum(Nslip)) :: projection - + real(pReal), dimension(3,sum(Nslip)) :: n, d integer :: i, j - + n = lattice_slip_normal (Nslip,structure,cOverA) d = lattice_slip_direction(Nslip,structure,cOverA) - + do i=1, sum(Nslip); do j=1, sum(Nslip) projection(i,j) = abs(math_inner(n(:,i),d(:,j))) enddo; enddo - + end function slipProjection_direction - - + + !-------------------------------------------------------------------------------------------------- !> @brief build a local coordinate system on slip systems !> @details Order: Direction, plane (normal), and common perpendicular @@ -2063,13 +2046,13 @@ function coordinateSystem_slip(Nslip,structure,cOverA) result(coordinateSystem) character(len=*), intent(in) :: structure !< lattice structure real(pReal), intent(in) :: cOverA !< c/a ratio real(pReal), dimension(3,3,sum(Nslip)) :: coordinateSystem - + real(pReal), dimension(:,:), allocatable :: slipSystems integer, dimension(:), allocatable :: NslipMax - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='coordinateSystem_slip: '//trim(structure)) - + select case(structure(1:3)) case('fcc') NslipMax = LATTICE_FCC_NSLIPSYSTEM @@ -2086,22 +2069,22 @@ function coordinateSystem_slip(Nslip,structure,cOverA) result(coordinateSystem) case default call IO_error(137,ext_msg='coordinateSystem_slip: '//trim(structure)) end select - + if (any(NslipMax(1:size(Nslip)) - Nslip < 0)) & call IO_error(145,ext_msg='Nslip '//trim(structure)) if (any(Nslip < 0)) & call IO_error(144,ext_msg='Nslip '//trim(structure)) - + coordinateSystem = buildCoordinateSystem(Nslip,NslipMax,slipSystems,structure,cOverA) - + end function coordinateSystem_slip - - + + !-------------------------------------------------------------------------------------------------- !> @brief Populates reduced interaction matrix !-------------------------------------------------------------------------------------------------- function buildInteraction(reacting_used,acting_used,reacting_max,acting_max,values,matrix) - + integer, dimension(:), intent(in) :: & reacting_used, & !< # of reacting systems per family as specified in material.config acting_used, & !< # of acting systems per family as specified in material.config @@ -2110,42 +2093,42 @@ function buildInteraction(reacting_used,acting_used,reacting_max,acting_max,valu real(pReal), dimension(:), intent(in) :: values !< interaction values integer, dimension(:,:), intent(in) :: matrix !< interaction types real(pReal), dimension(sum(reacting_used),sum(acting_used)) :: buildInteraction - + integer :: & acting_family_index, acting_family, acting_system, & reacting_family_index, reacting_family, reacting_system, & i,j,k,l - + do acting_family = 1,size(acting_used,1) acting_family_index = sum(acting_used(1:acting_family-1)) do acting_system = 1,acting_used(acting_family) - + do reacting_family = 1,size(reacting_used,1) reacting_family_index = sum(reacting_used(1:reacting_family-1)) do reacting_system = 1,reacting_used(reacting_family) - + i = sum( acting_max(1: acting_family-1)) + acting_system j = sum(reacting_max(1:reacting_family-1)) + reacting_system - + k = acting_family_index + acting_system l = reacting_family_index + reacting_system - + if (matrix(i,j) > size(values)) call IO_error(138,ext_msg='buildInteraction') - + buildInteraction(l,k) = values(matrix(i,j)) - + enddo; enddo enddo; enddo - + end function buildInteraction - - + + !-------------------------------------------------------------------------------------------------- !> @brief build a local coordinate system on slip, twin, trans, cleavage systems !> @details Order: Direction, plane (normal), and common perpendicular !-------------------------------------------------------------------------------------------------- function buildCoordinateSystem(active,potential,system,structure,cOverA) - + integer, dimension(:), intent(in) :: & active, & !< # of active systems per family potential !< # of potential systems per family @@ -2157,7 +2140,7 @@ function buildCoordinateSystem(active,potential,system,structure,cOverA) cOverA real(pReal), dimension(3,3,sum(active)) :: & buildCoordinateSystem - + real(pReal), dimension(3) :: & direction, normal integer :: & @@ -2165,26 +2148,26 @@ function buildCoordinateSystem(active,potential,system,structure,cOverA) p, & !< index in potential system matrix f, & !< index of my family s !< index of my system in current family - + if (len_trim(structure) /= 3) & call IO_error(137,ext_msg='buildCoordinateSystem: '//trim(structure)) if (trim(structure(1:3)) == 'bct' .and. cOverA > 2.0_pReal) & call IO_error(131,ext_msg='buildCoordinateSystem:'//trim(structure)) if (trim(structure(1:3)) == 'hex' .and. (cOverA < 1.0_pReal .or. cOverA > 2.0_pReal)) & call IO_error(131,ext_msg='buildCoordinateSystem:'//trim(structure)) - + a = 0 activeFamilies: do f = 1,size(active,1) activeSystems: do s = 1,active(f) a = a + 1 p = sum(potential(1:f-1))+s - + select case(trim(structure(1:3))) - + case ('fcc','bcc','iso','ort','bct') direction = system(1:3,p) normal = system(4:6,p) - + case ('hex') direction = [ system(1,p)*1.5_pReal, & (system(1,p)+2.0_pReal*system(2,p))*sqrt(0.75_pReal), & @@ -2192,23 +2175,23 @@ function buildCoordinateSystem(active,potential,system,structure,cOverA) normal = [ system(5,p), & (system(5,p)+2.0_pReal*system(6,p))/sqrt(3.0_pReal), & system(8,p)/cOverA ] ! plane (hkil)->(h (h+2k)/sqrt(3) l/(p/a)) - + case default call IO_error(137,ext_msg='buildCoordinateSystem: '//trim(structure)) - + end select - + buildCoordinateSystem(1:3,1,a) = direction/norm2(direction) buildCoordinateSystem(1:3,2,a) = normal /norm2(normal) buildCoordinateSystem(1:3,3,a) = math_cross(direction/norm2(direction),& normal /norm2(normal)) - + enddo activeSystems enddo activeFamilies - + end function buildCoordinateSystem - - + + !-------------------------------------------------------------------------------------------------- !> @brief Helper function to define transformation systems ! Needed to calculate Schmid matrix and rotated stiffness matrices. @@ -2216,7 +2199,7 @@ end function buildCoordinateSystem ! set a_Xcc = 0.0 for fcc -> hex transformation !-------------------------------------------------------------------------------------------------- subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) - + integer, dimension(:), intent(in) :: & Ntrans real(pReal), dimension(3,3,sum(Ntrans)), intent(out) :: & @@ -2226,10 +2209,10 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) cOverA, & !< c/a for target hex structure a_bcc, & !< lattice parameter a for target bcc structure a_fcc !< lattice parameter a for parent fcc structure - + type(rotation) :: & R, & !< Pitsch rotation - B !< Rotation of fcc to Bain coordinate system + B !< Rotation of fcc to Bain coordinate system real(pReal), dimension(3,3) :: & U, & !< Bain deformation ss, sd @@ -2267,7 +2250,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) 0.0, 1.0, 0.0, 10.26, & 0.0,-1.0, 0.0, 10.26 & ],shape(LATTICE_FCCTOBCC_SYSTEMTRANS)) - + integer, dimension(9,LATTICE_fcc_Ntrans), parameter :: & LATTICE_FCCTOBCC_BAINVARIANT = reshape( [& 1, 0, 0, 0, 1, 0, 0, 0, 1, & ! Pitsch OR (Ma & Hartmaier 2014, Table 3) @@ -2283,7 +2266,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) 0, 0, 1, 1, 0, 0, 0, 1, 0, & 0, 0, 1, 1, 0, 0, 0, 1, 0 & ],shape(LATTICE_FCCTOBCC_BAINVARIANT)) - + real(pReal), dimension(4,LATTICE_fcc_Ntrans), parameter :: & LATTICE_FCCTOBCC_BAINROT = reshape([& 1.0, 0.0, 0.0, 45.0, & ! Rotate fcc austensite to bain variant @@ -2299,7 +2282,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) 0.0, 0.0, 1.0, 45.0, & 0.0, 0.0, 1.0, 45.0 & ],shape(LATTICE_FCCTOBCC_BAINROT)) - + if (a_bcc > 0.0_pReal .and. a_fcc > 0.0_pReal .and. dEq0(cOverA)) then ! fcc -> bcc transformation do i = 1,sum(Ntrans) call R%fromAxisAngle(LATTICE_FCCTOBCC_SYSTEMTRANS(:,i),degrees=.true.,P=1) @@ -2307,7 +2290,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) x = real(LATTICE_FCCTOBCC_BAINVARIANT(1:3,i),pReal) y = real(LATTICE_FCCTOBCC_BAINVARIANT(4:6,i),pReal) z = real(LATTICE_FCCTOBCC_BAINVARIANT(7:9,i),pReal) - + U = (a_bcc/a_fcc)*math_outer(x,x) & + (a_bcc/a_fcc)*math_outer(y,y) * sqrt(2.0_pReal) & + (a_bcc/a_fcc)*math_outer(z,z) * sqrt(2.0_pReal) @@ -2319,7 +2302,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc) sd = MATH_I3 ss(1,3) = sqrt(2.0_pReal)/4.0_pReal sd(3,3) = cOverA/sqrt(8.0_pReal/3.0_pReal) - + do i = 1,sum(Ntrans) x = LATTICE_FCCTOHEX_SYSTEMTRANS(1:3,i)/norm2(LATTICE_FCCTOHEX_SYSTEMTRANS(1:3,i)) z = LATTICE_FCCTOHEX_SYSTEMTRANS(4:6,i)/norm2(LATTICE_FCCTOHEX_SYSTEMTRANS(4:6,i)) @@ -2340,7 +2323,7 @@ end subroutine buildTransformationSystem !> @brief select active systems as strings !-------------------------------------------------------------------------------------------------- function getlabels(active,potential,system) result(labels) - + integer, dimension(:), intent(in) :: & active, & !< # of active systems per family potential !< # of potential systems per family @@ -2350,7 +2333,7 @@ function getlabels(active,potential,system) result(labels) character(len=:), dimension(:), allocatable :: labels character(len=:), allocatable :: label - integer :: i,j + integer :: i,j integer :: & a, & !< index of active system p, & !< index in potential system matrix @@ -2359,13 +2342,13 @@ function getlabels(active,potential,system) result(labels) i = 2*size(system,1) + (size(system,1) - 2) + 4 ! 2 letters per index + spaces + brackets allocate(character(len=i) :: labels(sum(active)), label) - + a = 0 activeFamilies: do f = 1,size(active,1) activeSystems: do s = 1,active(f) a = a + 1 p = sum(potential(1:f-1))+s - + i = 1 label(i:i) = '[' direction: do j = 1, size(system,1)/2 @@ -2374,7 +2357,7 @@ function getlabels(active,potential,system) result(labels) i = i + 3 enddo direction label(i:i) = ']' - + i = i +1 label(i:i) = '(' normal: do j = size(system,1)/2+1, size(system,1) @@ -2383,12 +2366,12 @@ function getlabels(active,potential,system) result(labels) i = i + 3 enddo normal label(i:i) = ')' - + labels(s) = label enddo activeSystems enddo activeFamilies - + end function getlabels From 20a17f71fd9665d5a7d6f85e184ff10646aa6467 Mon Sep 17 00:00:00 2001 From: Test User Date: Tue, 25 Feb 2020 21:44:58 +0100 Subject: [PATCH 43/51] [skip ci] updated version information after successful test of v2.0.3-1778-g822e6b71 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index ff156acbf..e0cf49f15 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1747-ga2e8e5b1 +v2.0.3-1778-g822e6b71 From 839443bc85e15b972417975d7f2df887b07c6974 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 25 Feb 2020 18:53:12 +0100 Subject: [PATCH 44/51] better (generic) name where possible --- src/grid/grid_mech_FEM.f90 | 114 ++++++++--------- src/grid/grid_mech_spectral_basic.f90 | 94 +++++++------- src/grid/grid_mech_spectral_polarisation.f90 | 122 +++++++++---------- 3 files changed, 165 insertions(+), 165 deletions(-) diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index b51ebb56a..57a27ebe8 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -21,10 +21,10 @@ module grid_mech_FEM use discretization use mesh_grid use debug - + implicit none private - + !-------------------------------------------------------------------------------------------------- ! derived types type(tSolutionParams), private :: params @@ -52,20 +52,20 @@ module grid_mech_FEM F_aim_lastIter = math_I3, & F_aim_lastInc = math_I3, & !< previous average deformation gradient P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - + character(len=pStringLen), private :: incInfo !< time and increment information - + real(pReal), private, dimension(3,3,3,3) :: & - C_volAvg = 0.0_pReal, & !< current volume average stiffness + C_volAvg = 0.0_pReal, & !< current volume average stiffness C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness S = 0.0_pReal !< current compliance (filled up with zeros) - + real(pReal), private :: & err_BC !< deviation from stress BC - + integer, private :: & totalIter = 0 !< total iteration in current increment - + public :: & grid_mech_FEM_init, & grid_mech_FEM_solution, & @@ -79,7 +79,7 @@ contains !> @brief allocates all necessary fields and fills them with data, potentially from restart info !-------------------------------------------------------------------------------------------------- subroutine grid_mech_FEM_init - + real(pReal) :: HGCoeff = 0.0e-2_pReal PetscInt, dimension(0:worldsize-1) :: localK real(pReal), dimension(3,3) :: & @@ -99,9 +99,9 @@ subroutine grid_mech_FEM_init real(pReal), dimension(3,3,3,3) :: devNull PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc - + write(6,'(/,a)') ' <<<+- grid_mech_FEM init -+>>>'; flush(6) - + !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PETScOptionsInsertString(PETSC_NULL_OPTIONS,'-mech_snes_type newtonls -mech_ksp_type fgmres & @@ -115,11 +115,11 @@ subroutine grid_mech_FEM_init allocate(F (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(P_current (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) - + !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,mech_snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(mech_snes,'mech_',ierr);CHKERRQ(ierr) + call SNESSetOptionsPrefix(mech_snes,'mech_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) @@ -141,12 +141,12 @@ subroutine grid_mech_FEM_init call DMCreateGlobalVector(mech_grid,solution_lastInc,ierr); CHKERRQ(ierr) call DMCreateGlobalVector(mech_grid,solution_rate ,ierr); CHKERRQ(ierr) call DMSNESSetFunctionLocal(mech_grid,formResidual,PETSC_NULL_SNES,ierr) - CHKERRQ(ierr) + CHKERRQ(ierr) call DMSNESSetJacobianLocal(mech_grid,formJacobian,PETSC_NULL_SNES,ierr) CHKERRQ(ierr) call SNESSetConvergenceTest(mech_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "_converged" CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mech_snes, huge(1), ierr); CHKERRQ(ierr) ! ignore linear solve failures + call SNESSetMaxLinearSolveFailures(mech_snes, huge(1), ierr); CHKERRQ(ierr) ! ignore linear solve failures call SNESSetFromOptions(mech_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional cli arguments !-------------------------------------------------------------------------------------------------- @@ -156,15 +156,15 @@ subroutine grid_mech_FEM_init call VecSet(solution_rate ,0.0_pReal,ierr);CHKERRQ(ierr) call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr) call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr) - + call DMDAGetCorners(mech_grid,xstart,ystart,zstart,xend,yend,zend,ierr) ! local grid extent - CHKERRQ(ierr) + CHKERRQ(ierr) xend = xstart+xend-1 yend = ystart+yend-1 zend = zstart+zend-1 delta = geomSize/real(grid,pReal) ! grid spacing detJ = product(delta) ! cell volume - + BMat = reshape(real([-1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), & 1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), & -1.0_pReal/delta(1), 1.0_pReal/delta(2),-1.0_pReal/delta(3), & @@ -173,7 +173,7 @@ subroutine grid_mech_FEM_init 1.0_pReal/delta(1),-1.0_pReal/delta(2), 1.0_pReal/delta(3), & -1.0_pReal/delta(1), 1.0_pReal/delta(2), 1.0_pReal/delta(3), & 1.0_pReal/delta(1), 1.0_pReal/delta(2), 1.0_pReal/delta(3)],pReal), [3,8])/4.0_pReal ! shape function derivative matrix - + HGMat = matmul(transpose(HGcomp),HGcomp) & * HGCoeff*(delta(1)*delta(2) + delta(2)*delta(3) + delta(3)*delta(1))/16.0_pReal ! hourglass stabilization matrix @@ -181,11 +181,11 @@ subroutine grid_mech_FEM_init ! init fields restartRead: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' - + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') - + call HDF5_read(groupHandle,F_aim, 'F_aim') call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc') call HDF5_read(groupHandle,F_aimDot, 'F_aimDot') @@ -193,7 +193,7 @@ subroutine grid_mech_FEM_init call HDF5_read(groupHandle,F_lastInc, 'F_lastInc') call HDF5_read(groupHandle,u_current, 'u') call HDF5_read(groupHandle,u_lastInc, 'u_lastInc') - + elseif (interface_restartInc == 0) then restartRead F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) @@ -207,12 +207,12 @@ subroutine grid_mech_FEM_init CHKERRQ(ierr) call DMDAVecRestoreArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr) CHKERRQ(ierr) - + restartRead2: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc') - + call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -243,14 +243,14 @@ function grid_mech_FEM_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation ! PETSc Data PetscErrorCode :: ierr SNESConvergedReason :: reason - + incInfo = incInfoIn !-------------------------------------------------------------------------------------------------- ! update stiffness (and gamma operator) S = utilities_maskedCompliance(rotation_BC,stress_BC%maskLogical,C_volAvg) !-------------------------------------------------------------------------------------------------- -! set module wide available data +! set module wide available data params%stress_mask = stress_BC%maskFloat params%stress_BC = stress_BC%values params%rotation_BC = rotation_BC @@ -258,13 +258,13 @@ function grid_mech_FEM_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation params%timeincOld = timeinc_old !-------------------------------------------------------------------------------------------------- -! solve BVP +! solve BVP call SNESsolve(mech_snes,PETSC_NULL_VEC,solution_current,ierr);CHKERRQ(ierr) !-------------------------------------------------------------------------------------------------- ! check convergence call SNESGetConvergedReason(mech_snes,reason,ierr);CHKERRQ(ierr) - + solution%converged = reason > 0 solution%iterationsNeeded = totalIter solution%termIll = terminallyIll @@ -296,15 +296,15 @@ subroutine grid_mech_FEM_forward(cutBack,guess,timeinc,timeinc_old,loadCaseTime, PetscErrorCode :: ierr PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc - + call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr) call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr) - + if (cutBack) then C_volAvg = C_volAvgLastInc else C_volAvgLastInc = C_volAvg - + F_aimDot = merge(stress_BC%maskFloat*(F_aim-F_aim_lastInc)/timeinc_old, 0.0_pReal, guess) F_aim_lastInc = F_aim @@ -329,10 +329,10 @@ subroutine grid_mech_FEM_forward(cutBack,guess,timeinc,timeinc_old,loadCaseTime, call VecSet(solution_rate,0.0_pReal,ierr); CHKERRQ(ierr) endif call VecCopy(solution_current,solution_lastInc,ierr); CHKERRQ(ierr) - + F_lastInc = F - - materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3]) + + materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3]) endif !-------------------------------------------------------------------------------------------------- @@ -365,12 +365,12 @@ subroutine grid_mech_FEM_restartWrite integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc character(len=pStringLen) :: fileName - + call DMDAVecGetArrayF90(mech_grid,solution_current,u_current,ierr); CHKERRQ(ierr) call DMDAVecGetArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr); CHKERRQ(ierr) write(6,'(a)') ' writing solver data required for restart to file'; flush(6) - + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') @@ -388,7 +388,7 @@ subroutine grid_mech_FEM_restartWrite call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - + call DMDAVecRestoreArrayF90(mech_grid,solution_current,u_current,ierr);CHKERRQ(ierr) call DMDAVecRestoreArrayF90(mech_grid,solution_lastInc,u_lastInc,ierr);CHKERRQ(ierr) @@ -405,7 +405,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i PetscReal, intent(in) :: & devNull1, & devNull2, & - fnorm + fnorm SNESConvergedReason :: reason PetscObject :: dummy PetscErrorCode :: ierr @@ -421,7 +421,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i if ((totalIter >= itmin .and. & all([ err_div/divTol, & err_BC /BCTol ] < 1.0_pReal)) & - .or. terminallyIll) then + .or. terminallyIll) then reason = 1 elseif (totalIter >= itmax) then reason = -1 @@ -435,10 +435,10 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i write(6,'(1/,a,f12.2,a,es8.2,a,es9.2,a)') ' error divergence = ', & err_div/divTol, ' (',err_div,' / m, tol = ',divTol,')' write(6,'(a,f12.2,a,es8.2,a,es9.2,a)') ' error stress BC = ', & - err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' + err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' write(6,'(/,a)') ' ===========================================================================' flush(6) - + end subroutine converged @@ -475,7 +475,7 @@ subroutine formResidual(da_local,x_local, & write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter+1, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim =', transpose(F_aim) flush(6) @@ -491,7 +491,7 @@ subroutine formResidual(da_local,x_local, & x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk) enddo; enddo; enddo ii = i-xstart+1; jj = j-ystart+1; kk = k-zstart+1 - F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotTensor2(F_aim,active=.true.) + transpose(matmul(BMat,x_elem)) + F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem)) enddo; enddo; enddo call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) @@ -501,7 +501,7 @@ subroutine formResidual(da_local,x_local, & P_av,C_volAvg,devNull, & F,params%timeinc,params%rotation_BC) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) - + !-------------------------------------------------------------------------------------------------- ! stress BC handling F_aim_lastIter = F_aim @@ -535,24 +535,24 @@ subroutine formResidual(da_local,x_local, & enddo; enddo; enddo call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) - + !-------------------------------------------------------------------------------------------------- ! applying boundary conditions call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) if (zstart == 0) then - f_scal(0:2,xstart,ystart,zstart) = 0.0 - f_scal(0:2,xend+1,ystart,zstart) = 0.0 - f_scal(0:2,xstart,yend+1,zstart) = 0.0 - f_scal(0:2,xend+1,yend+1,zstart) = 0.0 + f_scal(0:2,xstart,ystart,zstart) = 0.0 + f_scal(0:2,xend+1,ystart,zstart) = 0.0 + f_scal(0:2,xstart,yend+1,zstart) = 0.0 + f_scal(0:2,xend+1,yend+1,zstart) = 0.0 endif if (zend + 1 == grid(3)) then - f_scal(0:2,xstart,ystart,zend+1) = 0.0 - f_scal(0:2,xend+1,ystart,zend+1) = 0.0 - f_scal(0:2,xstart,yend+1,zend+1) = 0.0 - f_scal(0:2,xend+1,yend+1,zend+1) = 0.0 + f_scal(0:2,xstart,ystart,zend+1) = 0.0 + f_scal(0:2,xend+1,ystart,zend+1) = 0.0 + f_scal(0:2,xstart,yend+1,zend+1) = 0.0 + f_scal(0:2,xend+1,yend+1,zend+1) = 0.0 endif - call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) - + call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + end subroutine formResidual @@ -574,7 +574,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) PetscObject :: dummy MatNullSpace :: matnull PetscErrorCode :: ierr - + BMatFull = 0.0 BMatFull(1:3,1 :8 ) = BMat BMatFull(4:6,9 :16) = BMat @@ -623,7 +623,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - + !-------------------------------------------------------------------------------------------------- ! applying boundary conditions diag = (C_volAvg(1,1,1,1)/delta(1)**2.0_pReal + & diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index af8cbc377..25ac9cb63 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -28,11 +28,11 @@ module grid_mech_spectral_basic !-------------------------------------------------------------------------------------------------- ! derived types type(tSolutionParams), private :: params - + type, private :: tNumerics logical :: update_gamma !< update gamma operator with current stiffness end type tNumerics - + type(tNumerics) :: num ! numerics parameters. Better name? !-------------------------------------------------------------------------------------------------- @@ -55,21 +55,21 @@ module grid_mech_spectral_basic F_aim_lastInc = math_I3, & !< previous average deformation gradient P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - character(len=pStringLen), private :: incInfo !< time and increment information + character(len=pStringLen), private :: incInfo !< time and increment information real(pReal), private, dimension(3,3,3,3) :: & - C_volAvg = 0.0_pReal, & !< current volume average stiffness + C_volAvg = 0.0_pReal, & !< current volume average stiffness C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness C_minMaxAvgLastInc = 0.0_pReal, & !< previous (min+max)/2 stiffness S = 0.0_pReal !< current compliance (filled up with zeros) - + real(pReal), private :: & err_BC, & !< deviation from stress BC err_div !< RMS of div of P - + integer, private :: & totalIter = 0 !< total iteration in current increment - + public :: & grid_mech_spectral_basic_init, & grid_mech_spectral_basic_solution, & @@ -83,27 +83,27 @@ contains !> @brief allocates all necessary fields and fills them with data, potentially from restart info !-------------------------------------------------------------------------------------------------- subroutine grid_mech_spectral_basic_init - + real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P real(pReal), dimension(3,3) :: & temp33_Real = 0.0_pReal - + PetscErrorCode :: ierr PetscScalar, pointer, dimension(:,:,:,:) :: & F ! pointer to solution data - PetscInt, dimension(worldsize) :: localK + PetscInt, dimension(worldsize) :: localK integer(HID_T) :: fileHandle, groupHandle integer :: fileUnit character(len=pStringLen) :: fileName - + write(6,'(/,a)') ' <<<+- grid_mech_spectral_basic init -+>>>'; flush(6) - + write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity 46:37–53, 2013' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2012.09.012' - + write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:31–45, 2015' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006' - + num%update_gamma = config_numerics%getInt('update_gamma',defaultVal=0) > 0 !-------------------------------------------------------------------------------------------------- @@ -117,11 +117,11 @@ subroutine grid_mech_spectral_basic_init ! allocate global fields allocate (F_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate (Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) - + !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr) + call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank+1) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) @@ -139,45 +139,45 @@ subroutine grid_mech_spectral_basic_init call DMsetUp(da,ierr); CHKERRQ(ierr) call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor) call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) + CHKERRQ(ierr) call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" CHKERRQ(ierr) call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- -! init fields +! init fields call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! places pointer on PETSc data - + restartRead: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') - + call HDF5_read(groupHandle,F_aim, 'F_aim') call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc') call HDF5_read(groupHandle,F_aimDot, 'F_aimDot') call HDF5_read(groupHandle,F, 'F') call HDF5_read(groupHandle,F_lastInc, 'F_lastInc') - + elseif (interface_restartInc == 0) then restartRead F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F = reshape(F_lastInc,[9,grid(1),grid(2),grid3]) endif restartRead - + materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent call Utilities_updateCoords(reshape(F,shape(F_lastInc))) call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! deassociate pointer - + restartRead2: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') call HDF5_read(groupHandle,C_volAvgLastInc,'C_volAvgLastInc') - + call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -197,7 +197,7 @@ end subroutine grid_mech_spectral_basic_init !> @brief solution for the basic scheme with internal iterations !-------------------------------------------------------------------------------------------------- function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_BC,rotation_BC) result(solution) - + !-------------------------------------------------------------------------------------------------- ! input data for solution character(len=*), intent(in) :: & @@ -215,7 +215,7 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_ ! PETSc Data PetscErrorCode :: ierr SNESConvergedReason :: reason - + incInfo = incInfoIn !-------------------------------------------------------------------------------------------------- @@ -224,7 +224,7 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_ if(num%update_gamma) call utilities_updateGamma(C_minMaxAvg) !-------------------------------------------------------------------------------------------------- -! set module wide available data +! set module wide available data params%stress_mask = stress_BC%maskFloat params%stress_BC = stress_BC%values params%rotation_BC = rotation_BC @@ -232,13 +232,13 @@ function grid_mech_spectral_basic_solution(incInfoIn,timeinc,timeinc_old,stress_ params%timeincOld = timeinc_old !-------------------------------------------------------------------------------------------------- -! solve BVP +! solve BVP call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) !-------------------------------------------------------------------------------------------------- ! check convergence call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) - + solution%converged = reason > 0 solution%iterationsNeeded = totalIter solution%termIll = terminallyIll @@ -271,14 +271,14 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo PetscScalar, dimension(:,:,:,:), pointer :: F call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) - + if (cutBack) then C_volAvg = C_volAvgLastInc C_minMaxAvg = C_minMaxAvgLastInc else C_volAvgLastInc = C_volAvg C_minMaxAvgLastInc = C_minMaxAvg - + F_aimDot = merge(stress_BC%maskFloat*(F_aim-F_aim_lastInc)/timeinc_old, 0.0_pReal, guess) F_aim_lastInc = F_aim @@ -297,9 +297,9 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo Fdot = utilities_calculateRate(guess, & F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),timeinc_old, & - rotation_BC%rotTensor2(F_aimDot,active=.true.)) + rotation_BC%rotate(F_aimDot,active=.true.)) F_lastInc = reshape(F,[3,3,grid(1),grid(2),grid3]) - + materialpoint_F0 = reshape(F, [3,3,1,product(grid(1:2))*grid3]) endif @@ -307,9 +307,9 @@ subroutine grid_mech_spectral_basic_forward(cutBack,guess,timeinc,timeinc_old,lo ! update average and local deformation gradients F_aim = F_aim_lastInc + F_aimDot * timeinc F = reshape(Utilities_forwardField(timeinc,F_lastInc,Fdot, & ! estimate of F at end of time+timeinc that matches rotated F_aim on average - rotation_BC%rotTensor2(F_aim,active=.true.)),[9,grid(1),grid(2),grid3]) + rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3]) call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) - + end subroutine grid_mech_spectral_basic_forward @@ -341,11 +341,11 @@ subroutine grid_mech_spectral_basic_restartWrite call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) write(6,'(a)') ' writing solver data required for restart to file'; flush(6) - + write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') - + call HDF5_write(groupHandle,F_aim, 'F_aim') call HDF5_write(groupHandle,F_aim_lastInc,'F_aim_lastInc') call HDF5_write(groupHandle,F_aimDot, 'F_aimDot') @@ -358,7 +358,7 @@ subroutine grid_mech_spectral_basic_restartWrite call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - + if (num%update_gamma) call utilities_saveReferenceStiffness call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) @@ -376,7 +376,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm PetscReal, intent(in) :: & devNull1, & devNull2, & - devNull3 + devNull3 SNESConvergedReason :: reason PetscObject :: dummy PetscErrorCode :: ierr @@ -390,7 +390,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm if ((totalIter >= itmin .and. & all([ err_div/divTol, & err_BC /BCTol ] < 1.0_pReal)) & - .or. terminallyIll) then + .or. terminallyIll) then reason = 1 elseif (totalIter >= itmax) then reason = -1 @@ -404,10 +404,10 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm write(6,'(1/,a,f12.2,a,es8.2,a,es9.2,a)') ' error divergence = ', & err_div/divTol, ' (',err_div,' / m, tol = ',divTol,')' write(6,'(a,f12.2,a,es8.2,a,es9.2,a)') ' error stress BC = ', & - err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' + err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' write(6,'(/,a)') ' ===========================================================================' - flush(6) - + flush(6) + end subroutine converged @@ -441,7 +441,7 @@ subroutine formResidual(in, F, & write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim =', transpose(F_aim) flush(6) @@ -453,7 +453,7 @@ subroutine formResidual(in, F, & P_av,C_volAvg,C_minMaxAvg, & F,params%timeinc,params%rotation_BC) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) - + !-------------------------------------------------------------------------------------------------- ! stress BC handling deltaF_aim = math_mul3333xx33(S, P_av - params%stress_BC) @@ -466,9 +466,9 @@ subroutine formResidual(in, F, & tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = residuum ! store fPK field for subsequent FFT forward transform call utilities_FFTtensorForward ! FFT forward of global "tensorField_real" err_div = Utilities_divergenceRMS() ! divRMS of tensorField_fourier for later use - call utilities_fourierGammaConvolution(params%rotation_BC%rotTensor2(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier + call utilities_fourierGammaConvolution(params%rotation_BC%rotate(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier call utilities_FFTtensorBackward ! FFT backward of global tensorField_fourier - + !-------------------------------------------------------------------------------------------------- ! constructing residual residuum = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index bdc65a8c5..ab880e2a6 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -22,20 +22,20 @@ module grid_mech_spectral_polarisation use homogenization use mesh_grid use debug - + implicit none private - + !-------------------------------------------------------------------------------------------------- ! derived types type(tSolutionParams), private :: params - + type, private :: tNumerics logical :: update_gamma !< update gamma operator with current stiffness end type tNumerics - + type(tNumerics) :: num ! numerics parameters. Better name? - + !-------------------------------------------------------------------------------------------------- ! PETSc data DM, private :: da @@ -46,7 +46,7 @@ module grid_mech_spectral_polarisation ! common pointwise data real(pReal), private, dimension(:,:,:,:,:), allocatable :: & F_lastInc, & !< field of previous compatible deformation gradients - F_tau_lastInc, & !< field of previous incompatible deformation gradient + F_tau_lastInc, & !< field of previous incompatible deformation gradient Fdot, & !< field of assumed rate of compatible deformation gradient F_tauDot !< field of assumed rate of incopatible deformation gradient @@ -58,25 +58,25 @@ module grid_mech_spectral_polarisation F_aim_lastInc = math_I3, & !< previous average deformation gradient F_av = 0.0_pReal, & !< average incompatible def grad field P_av = 0.0_pReal !< average 1st Piola--Kirchhoff stress - + character(len=pStringLen), private :: incInfo !< time and increment information real(pReal), private, dimension(3,3,3,3) :: & - C_volAvg = 0.0_pReal, & !< current volume average stiffness + C_volAvg = 0.0_pReal, & !< current volume average stiffness C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness C_minMaxAvgLastInc = 0.0_pReal, & !< previous (min+max)/2 stiffness S = 0.0_pReal, & !< current compliance (filled up with zeros) C_scale = 0.0_pReal, & S_scale = 0.0_pReal - + real(pReal), private :: & err_BC, & !< deviation from stress BC err_curl, & !< RMS of curl of F err_div !< RMS of div of P - + integer, private :: & totalIter = 0 !< total iteration in current increment - + public :: & grid_mech_spectral_polarisation_init, & grid_mech_spectral_polarisation_solution, & @@ -90,26 +90,26 @@ contains !> @brief allocates all necessary fields and fills them with data, potentially from restart info !-------------------------------------------------------------------------------------------------- subroutine grid_mech_spectral_polarisation_init - + real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P real(pReal), dimension(3,3) :: & temp33_Real = 0.0_pReal - + PetscErrorCode :: ierr PetscScalar, pointer, dimension(:,:,:,:) :: & FandF_tau, & ! overall pointer to solution data F, & ! specific (sub)pointer F_tau ! specific (sub)pointer - PetscInt, dimension(0:worldsize-1) :: localK + PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle integer :: fileUnit character(len=pStringLen) :: fileName - + write(6,'(/,a)') ' <<<+- grid_mech_spectral_polarisation init -+>>>'; flush(6) - + write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity 66:31–45, 2015' write(6,'(a)') ' https://doi.org/10.1016/j.ijplas.2014.02.006' - + num%update_gamma = config_numerics%getInt('update_gamma',defaultVal=0) > 0 !-------------------------------------------------------------------------------------------------- @@ -125,11 +125,11 @@ subroutine grid_mech_spectral_polarisation_init allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_tau_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_tauDot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) - + !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr) + call SNESSetOptionsPrefix(snes,'mech_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) @@ -147,24 +147,24 @@ subroutine grid_mech_spectral_polarisation_init call DMsetUp(da,ierr); CHKERRQ(ierr) call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 18, i.e. every def grad tensor) call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) + CHKERRQ(ierr) call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" CHKERRQ(ierr) call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- -! init fields +! init fields call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! places pointer on PETSc data F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) - + restartRead: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading restart data of increment ', interface_restartInc, ' from file' write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName) groupHandle = HDF5_openGroup(fileHandle,'solver') - + call HDF5_read(groupHandle,F_aim, 'F_aim') call HDF5_read(groupHandle,F_aim_lastInc,'F_aim_lastInc') call HDF5_read(groupHandle,F_aimDot, 'F_aimDot') @@ -172,21 +172,21 @@ subroutine grid_mech_spectral_polarisation_init call HDF5_read(groupHandle,F_lastInc, 'F_lastInc') call HDF5_read(groupHandle,F_tau, 'F_tau') call HDF5_read(groupHandle,F_tau_lastInc,'F_tau_lastInc') - + elseif (interface_restartInc == 0) then restartRead F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F = reshape(F_lastInc,[9,grid(1),grid(2),grid3]) F_tau = 2.0_pReal*F F_tau_lastInc = 2.0_pReal*F_lastInc endif restartRead - + materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent call Utilities_updateCoords(reshape(F,shape(F_lastInc))) call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! deassociate pointer - + restartRead2: if (interface_restartInc > 0) then write(6,'(/,a,i0,a)') ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(groupHandle,C_volAvg, 'C_volAvg') @@ -200,12 +200,12 @@ subroutine grid_mech_spectral_polarisation_init call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) endif restartRead2 - + call utilities_updateGamma(C_minMaxAvg) call utilities_saveReferenceStiffness C_scale = C_minMaxAvg S_scale = math_invSym3333(C_minMaxAvg) - + end subroutine grid_mech_spectral_polarisation_init @@ -229,9 +229,9 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old, solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: ierr SNESConvergedReason :: reason - + incInfo = incInfoIn !-------------------------------------------------------------------------------------------------- @@ -241,10 +241,10 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old, call utilities_updateGamma(C_minMaxAvg) C_scale = C_minMaxAvg S_scale = math_invSym3333(C_minMaxAvg) - endif + endif !-------------------------------------------------------------------------------------------------- -! set module wide available data +! set module wide available data params%stress_mask = stress_BC%maskFloat params%stress_BC = stress_BC%values params%rotation_BC = rotation_BC @@ -252,13 +252,13 @@ function grid_mech_spectral_polarisation_solution(incInfoIn,timeinc,timeinc_old, params%timeincOld = timeinc_old !-------------------------------------------------------------------------------------------------- -! solve BVP +! solve BVP call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) !-------------------------------------------------------------------------------------------------- ! check convergence call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) - + solution%converged = reason > 0 solution%iterationsNeeded = totalIter solution%termIll = terminallyIll @@ -299,7 +299,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc if (cutBack) then C_volAvg = C_volAvgLastInc C_minMaxAvg = C_minMaxAvgLastInc - else + else C_volAvgLastInc = C_volAvg C_minMaxAvgLastInc = C_minMaxAvg @@ -321,13 +321,13 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc Fdot = utilities_calculateRate(guess, & F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),timeinc_old, & - rotation_BC%rotTensor2(F_aimDot,active=.true.)) + rotation_BC%rotate(F_aimDot,active=.true.)) F_tauDot = utilities_calculateRate(guess, & F_tau_lastInc,reshape(F_tau,[3,3,grid(1),grid(2),grid3]), timeinc_old, & - rotation_BC%rotTensor2(F_aimDot,active=.true.)) + rotation_BC%rotate(F_aimDot,active=.true.)) F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3]) F_tau_lastInc = reshape(F_tau,[3,3,grid(1),grid(2),grid3]) - + materialpoint_F0 = reshape(F,[3,3,1,product(grid(1:2))*grid3]) endif @@ -335,7 +335,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc ! update average and local deformation gradients F_aim = F_aim_lastInc + F_aimDot * timeinc F = reshape(utilities_forwardField(timeinc,F_lastInc,Fdot, & ! estimate of F at end of time+timeinc that matches rotated F_aim on average - rotation_BC%rotTensor2(F_aim,active=.true.)),& + rotation_BC%rotate(F_aim,active=.true.)),& [9,grid(1),grid(2),grid3]) if (guess) then F_tau = reshape(Utilities_forwardField(timeinc,F_tau_lastInc,F_taudot), & @@ -351,7 +351,7 @@ subroutine grid_mech_spectral_polarisation_forward(cutBack,guess,timeinc,timeinc F_tau(1:9,i,j,k) = reshape(F_lambda33,[9])+F(1:9,i,j,k) enddo; enddo; enddo endif - + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) end subroutine grid_mech_spectral_polarisation_forward @@ -364,7 +364,7 @@ subroutine grid_mech_spectral_polarisation_updateCoords PetscErrorCode :: ierr PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau - + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) call utilities_updateCoords(FandF_tau(0:8,:,:,:)) call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) @@ -391,7 +391,7 @@ subroutine grid_mech_spectral_polarisation_restartWrite write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' fileHandle = HDF5_openFile(fileName,'w') groupHandle = HDF5_addGroup(fileHandle,'solver') - + call HDF5_write(groupHandle,F_aim, 'F_aim') call HDF5_write(groupHandle,F_aim_lastInc,'F_aim_lastInc') call HDF5_write(groupHandle,F_aimDot, 'F_aimDot') @@ -405,9 +405,9 @@ subroutine grid_mech_spectral_polarisation_restartWrite call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - + if(num%update_gamma) call utilities_saveReferenceStiffness - + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) end subroutine grid_mech_spectral_polarisation_restartWrite @@ -417,13 +417,13 @@ end subroutine grid_mech_spectral_polarisation_restartWrite !> @brief convergence check !-------------------------------------------------------------------------------------------------- subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr) - + SNES :: snes_local PetscInt, intent(in) :: PETScIter PetscReal, intent(in) :: & devNull1, & devNull2, & - devNull3 + devNull3 SNESConvergedReason :: reason PetscObject :: dummy PetscErrorCode :: ierr @@ -431,11 +431,11 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm curlTol, & divTol, & BCTol - + curlTol = max(maxval(abs(F_aim-math_I3))*err_curl_tolRel ,err_curl_tolAbs) divTol = max(maxval(abs(P_av)) *err_div_tolRel ,err_div_tolAbs) BCTol = max(maxval(abs(P_av)) *err_stress_tolRel,err_stress_tolAbs) - + if ((totalIter >= itmin .and. & all([ err_div /divTol, & err_curl/curlTol, & @@ -456,9 +456,9 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm write(6, '(a,f12.2,a,es8.2,a,es9.2,a)') ' error curl = ', & err_curl/curlTol,' (',err_curl,' -, tol = ',curlTol,')' write(6, '(a,f12.2,a,es8.2,a,es9.2,a)') ' error BC = ', & - err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' + err_BC/BCTol, ' (',err_BC, ' Pa, tol = ',BCTol,')' write(6,'(/,a)') ' ===========================================================================' - flush(6) + flush(6) end subroutine converged @@ -498,7 +498,7 @@ subroutine formResidual(in, FandF_tau, & F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) - + call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) @@ -510,14 +510,14 @@ subroutine formResidual(in, FandF_tau, & write(6,'(1x,a,3(a,i0))') trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) & write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotTensor2(F_aim,active=.true.)) + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & ' deformation gradient aim =', transpose(F_aim) flush(6) endif newIteration !-------------------------------------------------------------------------------------------------- -! +! tensorField_real = 0.0_pReal do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1) tensorField_real(1:3,1:3,i,j,k) = & @@ -525,15 +525,15 @@ subroutine formResidual(in, FandF_tau, & polarAlpha*matmul(F(1:3,1:3,i,j,k), & math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3)) enddo; enddo; enddo - + !-------------------------------------------------------------------------------------------------- -! doing convolution in Fourier space +! doing convolution in Fourier space call utilities_FFTtensorForward - call utilities_fourierGammaConvolution(params%rotation_BC%rotTensor2(polarBeta*F_aim,active=.true.)) + call utilities_fourierGammaConvolution(params%rotation_BC%rotate(polarBeta*F_aim,active=.true.)) call utilities_FFTtensorBackward !-------------------------------------------------------------------------------------------------- -! constructing residual +! constructing residual residual_F_tau = polarBeta*F - tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) !-------------------------------------------------------------------------------------------------- @@ -541,13 +541,13 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/polarBeta,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) - + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + !-------------------------------------------------------------------------------------------------- ! stress BC handling F_aim = F_aim - math_mul3333xx33(S, ((P_av - params%stress_BC))) ! S = 0.0 for no bc err_BC = maxval(abs((1.0_pReal-params%stress_mask) * math_mul3333xx33(C_scale,F_aim & - -params%rotation_BC%rotTensor2(F_av)) + & + -params%rotation_BC%rotate(F_av)) + & params%stress_mask * (P_av-params%stress_BC))) ! mask = 0.0 for no bc ! calculate divergence tensorField_real = 0.0_pReal @@ -566,7 +566,7 @@ subroutine formResidual(in, FandF_tau, & math_mul3333xx33(C_scale,F_tau(1:3,1:3,i,j,k) - F(1:3,1:3,i,j,k) - math_I3))) & + residual_F_tau(1:3,1:3,i,j,k) enddo; enddo; enddo - + !-------------------------------------------------------------------------------------------------- ! calculating curl tensorField_real = 0.0_pReal From 1d77af66c0dd7301d3df48445120d425065fbcc9 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 26 Feb 2020 07:23:29 +0100 Subject: [PATCH 45/51] preparing removal of initializeStructure --- src/lattice.f90 | 81 +++++++++++++++++++++++-------------------------- 1 file changed, 38 insertions(+), 43 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index b31cf98b3..ab2f43284 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -524,7 +524,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine lattice_init - integer :: Nphases, p + integer :: Nphases, p,i character(len=pStringLen) :: & tag = '' real(pReal) :: CoverA @@ -553,25 +553,8 @@ subroutine lattice_init allocate(lattice_Scleavage(3,3,3,lattice_maxNcleavage,Nphases),source=0.0_pReal) allocate(lattice_NcleavageSystem(lattice_maxNcleavageFamily,Nphases),source=0) - do p = 1, size(config_phase) - tag = config_phase(p)%getString('lattice_structure') - select case(tag(1:3)) - case('iso') - lattice_structure(p) = LATTICE_iso_ID - case('fcc') - lattice_structure(p) = LATTICE_fcc_ID - case('bcc') - lattice_structure(p) = LATTICE_bcc_ID - case('hex') - lattice_structure(p) = LATTICE_hex_ID - case('bct') - lattice_structure(p) = LATTICE_bct_ID - case('ort') - lattice_structure(p) = LATTICE_ort_ID - case default - call IO_error(130,ext_msg='lattice_init') - end select + do p = 1, size(config_phase) lattice_C66(1,1,p) = config_phase(p)%getFloat('c11',defaultVal=0.0_pReal) lattice_C66(1,2,p) = config_phase(p)%getFloat('c12',defaultVal=0.0_pReal) @@ -583,9 +566,44 @@ subroutine lattice_init lattice_C66(5,5,p) = config_phase(p)%getFloat('c55',defaultVal=0.0_pReal) lattice_C66(6,6,p) = config_phase(p)%getFloat('c66',defaultVal=0.0_pReal) - CoverA = config_phase(p)%getFloat('c/a',defaultVal=0.0_pReal) + tag = config_phase(p)%getString('lattice_structure') + select case(tag(1:3)) + case('iso') + lattice_structure(p) = LATTICE_iso_ID + case('fcc') + lattice_structure(p) = LATTICE_fcc_ID + case('bcc') + lattice_structure(p) = LATTICE_bcc_ID + case('hex') + if(CoverA < 1.0_pReal .or. CoverA > 2.0_pReal) call IO_error(131,el=p) + lattice_structure(p) = LATTICE_hex_ID + case('bct') + if(CoverA > 2.0_pReal) call IO_error(131,el=p) + lattice_structure(p) = LATTICE_bct_ID + case('ort') + lattice_structure(p) = LATTICE_ort_ID + case default + call IO_error(130,ext_msg='lattice_init') + end select + + lattice_C66(1:6,1:6,p) = lattice_symmetrizeC66(lattice_structure(p),lattice_C66(1:6,1:6,p)) + + lattice_mu(p) = 0.2_pReal *(lattice_C66(1,1,p) -lattice_C66(1,2,p) +3.0_pReal*lattice_C66(4,4,p)) ! (C11iso-C12iso)/2 with C11iso=(3*C11+2*C12+4*C44)/5 and C12iso=(C11+4*C12-2*C44)/5 + lattice_nu(p) = ( lattice_C66(1,1,p) +4.0_pReal*lattice_C66(1,2,p) -2.0_pReal*lattice_C66(4,4,p)) & + / (4.0_pReal*lattice_C66(1,1,p) +6.0_pReal*lattice_C66(1,2,p) +2.0_pReal*lattice_C66(4,4,p))! C12iso/(C11iso+C12iso) with C11iso=(3*C11+2*C12+4*C44)/5 and C12iso=(C11+4*C12-2*C44)/5 + + lattice_C3333(1:3,1:3,1:3,1:3,p) = math_Voigt66to3333(lattice_C66(1:6,1:6,p)) ! Literature data is Voigt + lattice_C66(1:6,1:6,p) = math_sym3333to66(lattice_C3333(1:3,1:3,1:3,1:3,p)) ! DAMASK uses Mandel-weighting + + do i = 1, 6 + if (abs(lattice_C66(i,i,p)) 2.0_pReal) & - .and. lattice_structure(p) == LATTICE_hex_ID) call IO_error(131,el=p) ! checking physical significance of c/a - if ((CoverA > 2.0_pReal) & - .and. lattice_structure(p) == LATTICE_bct_ID) call IO_error(131,el=p) ! checking physical significance of c/a call lattice_initializeStructure(p, CoverA) enddo @@ -627,25 +641,6 @@ subroutine lattice_initializeStructure(myPhase,CoverA) integer :: & i - lattice_C66(1:6,1:6,myPhase) = lattice_symmetrizeC66(lattice_structure(myPhase),& - lattice_C66(1:6,1:6,myPhase)) - - lattice_mu(myPhase) = 0.2_pReal *( lattice_C66(1,1,myPhase) & - - lattice_C66(1,2,myPhase) & - + 3.0_pReal*lattice_C66(4,4,myPhase)) ! (C11iso-C12iso)/2 with C11iso=(3*C11+2*C12+4*C44)/5 and C12iso=(C11+4*C12-2*C44)/5 - lattice_nu(myPhase) = ( lattice_C66(1,1,myPhase) & - + 4.0_pReal*lattice_C66(1,2,myPhase) & - - 2.0_pReal*lattice_C66(4,4,myPhase)) & - /( 4.0_pReal*lattice_C66(1,1,myPhase) & - + 6.0_pReal*lattice_C66(1,2,myPhase) & - + 2.0_pReal*lattice_C66(4,4,myPhase))! C12iso/(C11iso+C12iso) with C11iso=(3*C11+2*C12+4*C44)/5 and C12iso=(C11+4*C12-2*C44)/5 - lattice_C3333(1:3,1:3,1:3,1:3,myPhase) = math_Voigt66to3333(lattice_C66(1:6,1:6,myPhase)) ! Literature data is Voigt - lattice_C66(1:6,1:6,myPhase) = math_sym3333to66(lattice_C3333(1:3,1:3,1:3,1:3,myPhase)) ! DAMASK uses Mandel-weighting - do i = 1, 6 - if (abs(lattice_C66(i,i,myPhase)) Date: Wed, 26 Feb 2020 11:31:52 +0100 Subject: [PATCH 46/51] not supported anymore --- .../Phase_Phenopowerlaw_multiField.config | 49 ------------------- 1 file changed, 49 deletions(-) delete mode 100644 examples/ConfigFiles/Phase_Phenopowerlaw_multiField.config diff --git a/examples/ConfigFiles/Phase_Phenopowerlaw_multiField.config b/examples/ConfigFiles/Phase_Phenopowerlaw_multiField.config deleted file mode 100644 index adad3710e..000000000 --- a/examples/ConfigFiles/Phase_Phenopowerlaw_multiField.config +++ /dev/null @@ -1,49 +0,0 @@ -[Aluminum] -elasticity hooke -plasticity phenopowerlaw - -(output) resistance_slip -(output) shearrate_slip -(output) resolvedstress_slip -(output) accumulated_shear_slip -(output) resistance_twin -(output) shearrate_twin -(output) resolvedstress_twin -(output) accumulated_shear_twin - -lattice_structure fcc -Nslip 12 # per family -Ntwin 0 # per family - -c11 106.75e9 -c12 60.41e9 -c44 28.34e9 - -gdot0_slip 0.001 -n_slip 20 -tau0_slip 31e6 # per family -tausat_slip 63e6 # per family -a_slip 2.25 -gdot0_twin 0.001 -n_twin 20 -tau0_twin 31e6 # per family -h0_slipslip 75e6 -interaction_slipslip 1 1 1.4 1.4 1.4 1.4 -atol_resistance 1 - -(stiffness_degradation) damage -(stiffness_degradation) porosity -{./Phase_Damage.config} -{./Phase_Thermal.config} -{./Phase_Vacancy.config} -{./Phase_Porosity.config} -{./Phase_Hydrogen.config} -{./Source_Damage_IsoBrittle.config} -{./Source_Thermal_Dissipation.config} -{./Source_Vacancy_PhenoPlasticity.config} -{./Source_Vacancy_Irradiation.config} -{./Kinematics_Thermal_Expansion.config} -{./Kinematics_Vacancy_Strain.config} -{./Kinematics_Hydrogen_Strain.config} - - From 1c1445b50d799096c5303f5595de56322f10294e Mon Sep 17 00:00:00 2001 From: Test User Date: Wed, 26 Feb 2020 13:18:41 +0100 Subject: [PATCH 47/51] [skip ci] updated version information after successful test of v2.0.3-1789-gfdd356fa --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index e0cf49f15..67dee0b88 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1778-g822e6b71 +v2.0.3-1789-gfdd356fa From 302e78aff8f88b5357cf87b893cf3641ac0bbdb1 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 26 Feb 2020 13:19:01 +0100 Subject: [PATCH 48/51] test damage results use pytest/HDF5 instead of test class and ASCII table --- .gitlab-ci.yml | 11 ++++++++++- PRIVATE | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ce645dcf5..a56b108d5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -104,7 +104,7 @@ checkout: - release ################################################################################################### -Pytest: +Pytest_python: stage: python script: - cd $DAMASKROOT/python @@ -385,6 +385,15 @@ Phenopowerlaw_singleSlip: - master - release +Pytest_grid: + stage: grid + script: + - cd pytest + - pytest + except: + - master + - release + ################################################################################################### Marc_compileIfort: stage: compileMarc diff --git a/PRIVATE b/PRIVATE index 6db5f4666..9573ce7bd 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit 6db5f4666fc651b4de3b44ceaed3f2b848170ac9 +Subproject commit 9573ce7bd2c1a7188c1aac5b83aa76d480c2bdb0 From 42309a6d27b2f1e2a33b20979c40f1d31f34f6b9 Mon Sep 17 00:00:00 2001 From: Test User Date: Thu, 27 Feb 2020 12:41:19 +0100 Subject: [PATCH 49/51] [skip ci] updated version information after successful test of v2.0.3-1805-g303ab407 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 67dee0b88..95c04de1e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1789-gfdd356fa +v2.0.3-1805-g303ab407 From 334eb93421f7d46b4b7982f9517c8669a7a6899e Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 27 Feb 2020 21:19:56 +0100 Subject: [PATCH 50/51] infere data type automatically need for IPF color autput --- python/damask/dadf5.py | 83 ++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 44 deletions(-) diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index c2ca7c0ca..24d0c1708 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -1090,29 +1090,27 @@ class DADF5(): for label in (labels if isinstance(labels,list) else [labels]): for p in self.iter_visible('con_physics'): if p != 'generic': - for c in self.iter_visible('constituents'): - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! - vtk_geom.GetCellData().AddArray(vtk_data[-1]) + for c in self.iter_visible('constituents'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: hard coded 1! + vtk_geom.GetCellData().AddArray(vtk_data[-1]) else: - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name - dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name - vtk_data[-1].SetName(dset_name) - vtk_geom.GetCellData().AddArray(vtk_data[-1]) + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True)) + ph_name = re.compile(r'(?<=(constituent\/))(.*?)(?=(generic))') # identify phase name + dset_name = '1_' + re.sub(ph_name,r'',x[0].split('/',1)[1]) # removing phase name + vtk_data[-1].SetName(dset_name) + vtk_geom.GetCellData().AddArray(vtk_data[-1]) self.set_visible('materialpoints',materialpoints_backup) @@ -1121,38 +1119,35 @@ class DADF5(): for label in (labels if isinstance(labels,list) else [labels]): for p in self.iter_visible('mat_physics'): if p != 'generic': - for m in self.iter_visible('materialpoints'): + for m in self.iter_visible('materialpoints'): + x = self.get_dataset_location(label) + if len(x) == 0: + continue + array = self.read_dataset(x,0) + shape = [array.shape[0],np.product(array.shape[1:])] + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_? + vtk_geom.GetCellData().AddArray(vtk_data[-1]) + else: x = self.get_dataset_location(label) if len(x) == 0: - continue + continue array = self.read_dataset(x,0) shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) #ToDo: why 1_? + vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape),deep=True)) + vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) vtk_geom.GetCellData().AddArray(vtk_data[-1]) - else: - x = self.get_dataset_location(label) - if len(x) == 0: - continue - array = self.read_dataset(x,0) - shape = [array.shape[0],np.product(array.shape[1:])] - vtk_data.append(numpy_support.numpy_to_vtk(num_array=array.reshape(shape), - deep=True,array_type= vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('1_'+x[0].split('/',1)[1]) - vtk_geom.GetCellData().AddArray(vtk_data[-1]) self.set_visible('constituents',constituents_backup) if mode.lower()=='cell': - writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ - vtk.vtkXMLUnstructuredGridWriter() - x = self.get_dataset_location('u_n') - vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0), - deep=True,array_type=vtk.VTK_DOUBLE)) - vtk_data[-1].SetName('u') - vtk_geom.GetPointData().AddArray(vtk_data[-1]) + writer = vtk.vtkXMLRectilinearGridWriter() if self.structured else \ + vtk.vtkXMLUnstructuredGridWriter() + x = self.get_dataset_location('u_n') + vtk_data.append(numpy_support.numpy_to_vtk(num_array=self.read_dataset(x,0),deep=True)) + vtk_data[-1].SetName('u') + vtk_geom.GetPointData().AddArray(vtk_data[-1]) elif mode.lower()=='point': - writer = vtk.vtkXMLPolyDataWriter() + writer = vtk.vtkXMLPolyDataWriter() file_out = '{}_inc{}.{}'.format(os.path.splitext(os.path.basename(self.fname))[0], From be08af71d4048228b9c76f6acd7b2902e797e6e0 Mon Sep 17 00:00:00 2001 From: Test User Date: Fri, 28 Feb 2020 12:24:14 +0100 Subject: [PATCH 51/51] [skip ci] updated version information after successful test of v2.0.3-1808-g13c3ce00 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 95c04de1e..433cf0451 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1805-g303ab407 +v2.0.3-1808-g13c3ce00