Merge branch 'development' into docstring-sphinx-adjustments
This commit is contained in:
commit
6f3b526811
|
@ -203,7 +203,6 @@ Post_OrientationConversion:
|
||||||
stage: postprocessing
|
stage: postprocessing
|
||||||
script:
|
script:
|
||||||
- OrientationConversion/test.py
|
- OrientationConversion/test.py
|
||||||
- OrientationConversion/test2.py
|
|
||||||
except:
|
except:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit 62bd5ede5260cd4e0e3d1c3930c474c1e045aeef
|
Subproject commit c595994cd8880acadf50b5dedb79156d04d35b91
|
|
@ -16,10 +16,6 @@ if not os.path.isdir(binDir):
|
||||||
#define ToDo list
|
#define ToDo list
|
||||||
processing_subDirs = ['pre',
|
processing_subDirs = ['pre',
|
||||||
'post',
|
'post',
|
||||||
'misc',
|
|
||||||
]
|
|
||||||
processing_extensions = ['.py',
|
|
||||||
'.sh',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
sys.stdout.write('\nsymbolic linking...\n')
|
sys.stdout.write('\nsymbolic linking...\n')
|
||||||
|
@ -31,7 +27,7 @@ for subDir in processing_subDirs:
|
||||||
|
|
||||||
for theFile in os.listdir(theDir):
|
for theFile in os.listdir(theDir):
|
||||||
theName,theExt = os.path.splitext(theFile)
|
theName,theExt = os.path.splitext(theFile)
|
||||||
if theExt in processing_extensions: # only consider files with proper extensions
|
if theExt in ['.py']:
|
||||||
|
|
||||||
src = os.path.abspath(os.path.join(theDir,theFile))
|
src = os.path.abspath(os.path.join(theDir,theFile))
|
||||||
sym_link = os.path.abspath(os.path.join(binDir,theName))
|
sym_link = os.path.abspath(os.path.join(binDir,theName))
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [angfile[s]]', description = """
|
|
||||||
Convert TSL/EDAX *.ang file to ASCIItable
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ang(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt')
|
|
|
@ -33,7 +33,7 @@ for filename in options.filenames:
|
||||||
results = damask.Result(filename)
|
results = damask.Result(filename)
|
||||||
|
|
||||||
if not results.structured: continue
|
if not results.structured: continue
|
||||||
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin)
|
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin).reshape(-1,3,order='F')
|
||||||
|
|
||||||
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
|
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
|
||||||
N_digits = 5 # hack to keep test intact
|
N_digits = 5 # hack to keep test intact
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Add column containing Cauchy stress based on deformation gradient and first Piola--Kirchhoff stress.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-f','--defgrad',
|
|
||||||
dest = 'defgrad',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'heading of columns containing deformation gradient [%default]')
|
|
||||||
parser.add_option('-p','--stress',
|
|
||||||
dest = 'stress',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(defgrad = 'f',
|
|
||||||
stress = 'p',
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
table.add('Cauchy',
|
|
||||||
damask.mechanics.Cauchy(table.get(options.stress ).reshape(-1,3,3),
|
|
||||||
table.get(options.defgrad).reshape(-1,3,3)).reshape(-1,9),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -17,7 +17,7 @@ def volTetrahedron(coords):
|
||||||
"""
|
"""
|
||||||
Return the volume of the tetrahedron with given vertices or sides.
|
Return the volume of the tetrahedron with given vertices or sides.
|
||||||
|
|
||||||
Ifvertices are given they must be in a NumPy array with shape (4,3): the
|
If vertices are given they must be in a NumPy array with shape (4,3): the
|
||||||
position vectors of the 4 vertices in 3 dimensions; if the six sides are
|
position vectors of the 4 vertices in 3 dimensions; if the six sides are
|
||||||
given, they must be an array of length 6. If both are given, the sides
|
given, they must be an array of length 6. If both are given, the sides
|
||||||
will be used in the calculation.
|
will be used in the calculation.
|
||||||
|
@ -67,14 +67,13 @@ def volumeMismatch(size,F,nodes):
|
||||||
(compatible) cube and determinant of deformation gradient at Fourier point.
|
(compatible) cube and determinant of deformation gradient at Fourier point.
|
||||||
"""
|
"""
|
||||||
coords = np.empty([8,3])
|
coords = np.empty([8,3])
|
||||||
vMismatch = np.empty(grid[::-1])
|
vMismatch = np.empty(F.shape[:3])
|
||||||
volInitial = size.prod()/grid.prod()
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
# calculate actual volume and volume resulting from deformation gradient
|
# calculate actual volume and volume resulting from deformation gradient
|
||||||
for k in range(grid[2]):
|
for k in range(grid[0]):
|
||||||
for j in range(grid[1]):
|
for j in range(grid[1]):
|
||||||
for i in range(grid[0]):
|
for i in range(grid[2]):
|
||||||
coords[0,0:3] = nodes[k, j, i ,0:3]
|
coords[0,0:3] = nodes[k, j, i ,0:3]
|
||||||
coords[1,0:3] = nodes[k ,j, i+1,0:3]
|
coords[1,0:3] = nodes[k ,j, i+1,0:3]
|
||||||
coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
|
coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
|
||||||
|
@ -91,8 +90,7 @@ def volumeMismatch(size,F,nodes):
|
||||||
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \
|
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \
|
||||||
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \
|
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \
|
||||||
/np.linalg.det(F[k,j,i,0:3,0:3])
|
/np.linalg.det(F[k,j,i,0:3,0:3])
|
||||||
return vMismatch/volInitial
|
return vMismatch/(size.prod()/grid.prod())
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def shapeMismatch(size,F,nodes,centres):
|
def shapeMismatch(size,F,nodes,centres):
|
||||||
|
@ -103,35 +101,34 @@ def shapeMismatch(size,F,nodes,centres):
|
||||||
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
|
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
|
||||||
the initial volume element with the current deformation gradient.
|
the initial volume element with the current deformation gradient.
|
||||||
"""
|
"""
|
||||||
coordsInitial = np.empty([8,3])
|
sMismatch = np.empty(F.shape[:3])
|
||||||
sMismatch = np.empty(grid[::-1])
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
# initial positions
|
# initial positions
|
||||||
coordsInitial[0,0:3] = [-size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]]
|
delta = size/grid*.5
|
||||||
coordsInitial[1,0:3] = [+size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]]
|
coordsInitial = np.vstack((delta * np.array((-1,-1,-1)),
|
||||||
coordsInitial[2,0:3] = [+size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]]
|
delta * np.array((+1,-1,-1)),
|
||||||
coordsInitial[3,0:3] = [-size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]]
|
delta * np.array((+1,+1,-1)),
|
||||||
coordsInitial[4,0:3] = [-size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]]
|
delta * np.array((-1,+1,-1)),
|
||||||
coordsInitial[5,0:3] = [+size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]]
|
delta * np.array((-1,-1,+1)),
|
||||||
coordsInitial[6,0:3] = [+size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]]
|
delta * np.array((+1,-1,+1)),
|
||||||
coordsInitial[7,0:3] = [-size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]]
|
delta * np.array((+1,+1,+1)),
|
||||||
coordsInitial = coordsInitial/2.0
|
delta * np.array((-1,+1,+1))))
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
#--------------------------------------------------------------------------------------------------
|
||||||
# compare deformed original and deformed positions to actual positions
|
# compare deformed original and deformed positions to actual positions
|
||||||
for k in range(grid[2]):
|
for k in range(grid[0]):
|
||||||
for j in range(grid[1]):
|
for j in range(grid[1]):
|
||||||
for i in range(grid[0]):
|
for i in range(grid[2]):
|
||||||
sMismatch[k,j,i] = \
|
sMismatch[k,j,i] = \
|
||||||
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\
|
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
|
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k, j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
|
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\
|
+ np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
|
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\
|
+ np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\
|
+ np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\
|
||||||
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
|
+ np.linalg.norm(nodes[k ,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
|
||||||
return sMismatch
|
return sMismatch
|
||||||
|
|
||||||
|
|
||||||
|
@ -178,20 +175,20 @@ for name in filenames:
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
|
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
|
||||||
|
|
||||||
F = table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3)
|
F = table.get(options.defgrad).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
|
||||||
nodes = damask.grid_filters.node_coord(size,F)
|
nodes = damask.grid_filters.node_coord(size,F)
|
||||||
|
|
||||||
if options.shape:
|
if options.shape:
|
||||||
centers = damask.grid_filters.cell_coord(size,F)
|
centers = damask.grid_filters.cell_coord(size,F)
|
||||||
shapeMismatch = shapeMismatch( size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes,centers)
|
shapeMismatch = shapeMismatch(size,F,nodes,centers)
|
||||||
table.add('shapeMismatch(({}))'.format(options.defgrad),
|
table.add('shapeMismatch(({}))'.format(options.defgrad),
|
||||||
shapeMismatch.reshape(-1,1),
|
shapeMismatch.reshape(-1,1,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
if options.volume:
|
if options.volume:
|
||||||
volumeMismatch = volumeMismatch(size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes)
|
volumeMismatch = volumeMismatch(size,F,nodes)
|
||||||
table.add('volMismatch(({}))'.format(options.defgrad),
|
table.add('volMismatch(({}))'.format(options.defgrad),
|
||||||
volumeMismatch.reshape(-1,1),
|
volumeMismatch.reshape(-1,1,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
|
@ -49,9 +49,10 @@ for name in filenames:
|
||||||
for label in options.labels:
|
for label in options.labels:
|
||||||
field = table.get(label)
|
field = table.get(label)
|
||||||
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
|
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
|
||||||
field = field.reshape(np.append(grid[::-1],shape))
|
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
|
||||||
|
curl = damask.grid_filters.curl(size,field)
|
||||||
table.add('curlFFT({})'.format(label),
|
table.add('curlFFT({})'.format(label),
|
||||||
damask.grid_filters.curl(size[::-1],field).reshape(-1,np.prod(shape)),
|
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Add column(s) containing determinant of requested tensor column(s).
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-t','--tensor',
|
|
||||||
dest = 'tensor',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'heading of columns containing tensor field values')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.tensor is None:
|
|
||||||
parser.error('no data column specified.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for tensor in options.tensor:
|
|
||||||
table.add('det({})'.format(tensor),
|
|
||||||
np.linalg.det(table.get(tensor).reshape(-1,3,3)),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,51 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(2)]', description = """
|
|
||||||
Add column(s) containing deviator of requested tensor column(s).
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-t','--tensor',
|
|
||||||
dest = 'tensor',
|
|
||||||
action = 'extend', metavar='<string LIST>',
|
|
||||||
help = 'heading of columns containing tensor field values')
|
|
||||||
parser.add_option('-s','--spherical',
|
|
||||||
dest = 'spherical',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'report spherical part of tensor (hydrostatic component, pressure)')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.tensor is None:
|
|
||||||
parser.error('no data column specified...')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for tensor in options.tensor:
|
|
||||||
table.add('dev({})'.format(tensor),
|
|
||||||
damask.mechanics.deviatoric_part(table.get(tensor).reshape(-1,3,3)).reshape(-1,9),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
if options.spherical:
|
|
||||||
table.add('sph({})'.format(tensor),
|
|
||||||
damask.mechanics.spherical_part(table.get(tensor).reshape(-1,3,3)),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -5,8 +5,6 @@ import sys
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,22 +50,22 @@ for name in filenames:
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
|
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
|
||||||
|
|
||||||
F = table.get(options.f).reshape(np.append(grid[::-1],(3,3)))
|
F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
|
||||||
if options.nodal:
|
if options.nodal:
|
||||||
table = damask.Table(damask.grid_filters.node_coord0(grid[::-1],size[::-1]).reshape(-1,3),
|
table = damask.Table(damask.grid_filters.node_coord0(grid,size).reshape(-1,3,order='F'),
|
||||||
{'pos':(3,)})
|
{'pos':(3,)})
|
||||||
table.add('avg({}).{}'.format(options.f,options.pos),
|
table.add('avg({}).{}'.format(options.f,options.pos),
|
||||||
damask.grid_filters.node_displacement_avg(size[::-1],F).reshape(-1,3),
|
damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
table.add('fluct({}).{}'.format(options.f,options.pos),
|
table.add('fluct({}).{}'.format(options.f,options.pos),
|
||||||
damask.grid_filters.node_displacement_fluct(size[::-1],F).reshape(-1,3),
|
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
|
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
|
||||||
else:
|
else:
|
||||||
table.add('avg({}).{}'.format(options.f,options.pos),
|
table.add('avg({}).{}'.format(options.f,options.pos),
|
||||||
damask.grid_filters.cell_displacement_avg(size[::-1],F).reshape(-1,3),
|
damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
table.add('fluct({}).{}'.format(options.f,options.pos),
|
table.add('fluct({}).{}'.format(options.f,options.pos),
|
||||||
damask.grid_filters.cell_displacement_fluct(size[::-1],F).reshape(-1,3),
|
damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
|
@ -49,9 +49,10 @@ for name in filenames:
|
||||||
for label in options.labels:
|
for label in options.labels:
|
||||||
field = table.get(label)
|
field = table.get(label)
|
||||||
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
|
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
|
||||||
field = field.reshape(np.append(grid[::-1],shape))
|
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
|
||||||
|
div = damask.grid_filters.divergence(size,field)
|
||||||
table.add('divFFT({})'.format(label),
|
table.add('divFFT({})'.format(label),
|
||||||
damask.grid_filters.divergence(size[::-1],field).reshape(-1,np.prod(shape)//3),
|
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
|
@ -49,9 +49,10 @@ for name in filenames:
|
||||||
for label in options.labels:
|
for label in options.labels:
|
||||||
field = table.get(label)
|
field = table.get(label)
|
||||||
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
|
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
|
||||||
field = field.reshape(np.append(grid[::-1],shape))
|
field = field.reshape(tuple(grid)+(-1,),order='F')
|
||||||
|
grad = damask.grid_filters.gradient(size,field)
|
||||||
table.add('gradFFT({})'.format(label),
|
table.add('gradFFT({})'.format(label),
|
||||||
damask.grid_filters.gradient(size[::-1],field).reshape(-1,np.prod(shape)*3),
|
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Add info lines to ASCIItable header.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-i',
|
|
||||||
'--info',
|
|
||||||
dest = 'info', action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'items to add')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.info is None:
|
|
||||||
parser.error('no info specified.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
table.comments += options.info
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,56 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Add vonMises equivalent values for symmetric part of requested strains and/or stresses.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-e','--strain',
|
|
||||||
dest = 'strain',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'heading(s) of columns containing strain tensors')
|
|
||||||
parser.add_option('-s','--stress',
|
|
||||||
dest = 'stress',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'heading(s) of columns containing stress tensors')
|
|
||||||
|
|
||||||
parser.set_defaults(strain = [],
|
|
||||||
stress = [],
|
|
||||||
)
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if options.stress is [] and options.strain is []:
|
|
||||||
parser.error('no data column specified...')
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for strain in options.strain:
|
|
||||||
table.add('Mises({})'.format(strain),
|
|
||||||
damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get(strain).reshape(-1,3,3))),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
for stress in options.stress:
|
|
||||||
table.add('Mises({})'.format(stress),
|
|
||||||
damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get(stress).reshape(-1,3,3))),
|
|
||||||
scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -172,7 +172,7 @@ for name in filenames:
|
||||||
|
|
||||||
elif inputtype == 'matrix':
|
elif inputtype == 'matrix':
|
||||||
d = representations['matrix'][1]
|
d = representations['matrix'][1]
|
||||||
o = damask.Rotation.fromMatrix(list(map(float,table.data[column:column+d])))
|
o = damask.Rotation.fromMatrix(np.array(list(map(float,table.data[column:column+d]))).reshape(3,3))
|
||||||
|
|
||||||
elif inputtype == 'frame':
|
elif inputtype == 'frame':
|
||||||
M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \
|
M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \
|
||||||
|
|
|
@ -214,7 +214,7 @@ for name in filenames:
|
||||||
|
|
||||||
outputAlive = True
|
outputAlive = True
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
||||||
o = damask.Rotation(list(map(float,table.data[column:column+4])))
|
o = damask.Rotation(np.array(list(map(float,table.data[column:column+4]))))
|
||||||
|
|
||||||
table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \
|
table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \
|
||||||
* np.sum(slip_normal * (o * normal),axis=1)))
|
* np.sum(slip_normal * (o * normal),axis=1)))
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Append data of ASCIItable(s) column-wise.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-a', '--add','--table',
|
|
||||||
dest = 'table',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'tables to add')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.table is None:
|
|
||||||
parser.error('no table specified.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
for addTable in options.table:
|
|
||||||
table2 = damask.Table.from_ASCII(addTable)
|
|
||||||
table2.data = table2.data[:table.data.shape[0]]
|
|
||||||
table.join(table2)
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -91,7 +91,7 @@ for name in filenames:
|
||||||
table = damask.Table(averagedDown,table.shapes,table.comments)
|
table = damask.Table(averagedDown,table.shapes,table.comments)
|
||||||
|
|
||||||
coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin)
|
coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin)
|
||||||
table.set(options.pos, coords.reshape(-1,3))
|
table.set(options.pos, coords.reshape(-1,3,order='F'))
|
||||||
|
|
||||||
|
|
||||||
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))
|
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))
|
||||||
|
|
|
@ -59,13 +59,13 @@ for name in filenames:
|
||||||
packing = np.array(options.packing,'i')
|
packing = np.array(options.packing,'i')
|
||||||
outSize = grid*packing
|
outSize = grid*packing
|
||||||
|
|
||||||
data = table.data.values.reshape(tuple(grid)+(-1,))
|
data = table.data.values.reshape(tuple(grid)+(-1,),order='F')
|
||||||
blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1)
|
blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1,order='F')
|
||||||
|
|
||||||
table = damask.Table(blownUp,table.shapes,table.comments)
|
table = damask.Table(blownUp,table.shapes,table.comments)
|
||||||
|
|
||||||
coords = damask.grid_filters.cell_coord0(outSize,size,origin)
|
coords = damask.grid_filters.cell_coord0(outSize,size,origin)
|
||||||
table.set(options.pos,coords.reshape(-1,3))
|
table.set(options.pos,coords.reshape(-1,3,order='F'))
|
||||||
table.set('elem',np.arange(1,outSize.prod()+1))
|
table.set('elem',np.arange(1,outSize.prod()+1))
|
||||||
|
|
||||||
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))
|
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))
|
||||||
|
|
|
@ -1,43 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Append data of ASCIItable(s) row-wise.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-a', '--add','--table',
|
|
||||||
dest = 'table',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'tables to add')
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.table is None:
|
|
||||||
parser.error('no table specified.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
for growTable in options.table:
|
|
||||||
table2 = damask.Table.from_ASCII(growTable)
|
|
||||||
table.append(table2)
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -41,73 +42,20 @@ parser.set_defaults(label = [],
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
if len(options.label) == 0:
|
|
||||||
parser.error('no labels specified.')
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
damask.util.report(scriptName,name)
|
||||||
table = damask.ASCIItable(name = name)
|
|
||||||
except IOError:
|
|
||||||
continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
|
||||||
table.head_read()
|
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
|
||||||
|
rng = np.random.default_rng(randomSeed)
|
||||||
|
|
||||||
# ------------------------------------------ process labels ---------------------------------------
|
for label in options.label:
|
||||||
|
data = table.get(label)
|
||||||
|
uniques,inverse = np.unique(data,return_inverse=True,axis=0) if options.unique else (data,np.arange(len(data)))
|
||||||
|
rng.shuffle(uniques)
|
||||||
|
table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
errors = []
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
remarks = []
|
|
||||||
columns = []
|
|
||||||
dims = []
|
|
||||||
|
|
||||||
indices = table.label_index (options.label)
|
|
||||||
dimensions = table.label_dimension(options.label)
|
|
||||||
for i,index in enumerate(indices):
|
|
||||||
if index == -1: remarks.append('label "{}" not present...'.format(options.label[i]))
|
|
||||||
else:
|
|
||||||
columns.append(index)
|
|
||||||
dims.append(dimensions[i])
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
|
|
||||||
np.random.seed(randomSeed)
|
|
||||||
|
|
||||||
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),
|
|
||||||
'random seed {}'.format(randomSeed),
|
|
||||||
])
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
table.data_readArray() # read all data at once
|
|
||||||
for col,dim in zip(columns,dims):
|
|
||||||
if options.unique:
|
|
||||||
s = set(map(tuple,table.data[:,col:col+dim])) # generate set of (unique) values
|
|
||||||
uniques = np.array(map(np.array,s)) # translate set to np.array
|
|
||||||
shuffler = dict(zip(s,np.random.permutation(len(s)))) # random permutation
|
|
||||||
table.data[:,col:col+dim] = uniques[np.array(map(lambda x: shuffler[tuple(x)],
|
|
||||||
table.data[:,col:col+dim]))] # fill table with mapped uniques
|
|
||||||
else:
|
|
||||||
np.random.shuffle(table.data[:,col:col+dim]) # independently shuffle every row
|
|
||||||
|
|
||||||
# ------------------------------------------ output result -----------------------------------------
|
|
||||||
|
|
||||||
table.data_writeArray()
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Rename scalar, vectorial, and/or tensorial data header labels.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
|
||||||
dest = 'label',
|
|
||||||
action = 'extend', metavar='<string LIST>',
|
|
||||||
help = 'column(s) to rename')
|
|
||||||
parser.add_option('-s','--substitute',
|
|
||||||
dest = 'substitute',
|
|
||||||
action = 'extend', metavar='<string LIST>',
|
|
||||||
help = 'new column label(s)')
|
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
|
||||||
substitute = [],
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if len(options.label) != len(options.substitute):
|
|
||||||
parser.error('number of column labels and substitutes do not match.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for label,substitute in zip(options.label,options.substitute):
|
|
||||||
table.rename(label,substitute,scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,49 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Uniformly scale column values by given factor.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
|
||||||
dest = 'labels',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help ='column(s) to scale')
|
|
||||||
parser.add_option('-f','--factor',
|
|
||||||
dest = 'factor',
|
|
||||||
action = 'extend', metavar='<float LIST>',
|
|
||||||
help = 'factor(s) per column')
|
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
|
||||||
factor = [])
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if len(options.labels) != len(options.factor):
|
|
||||||
parser.error('number of column labels and factors do not match.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for label,factor in zip(options.labels,options.factor):
|
|
||||||
table.set(label,table.get(label)*float(factor),scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,49 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Uniformly shift column values by given offset.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
|
||||||
dest = 'labels',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help ='column(s) to shift')
|
|
||||||
parser.add_option('-o','--offset',
|
|
||||||
dest = 'offset',
|
|
||||||
action = 'extend', metavar='<float LIST>',
|
|
||||||
help = 'offset(s) per column')
|
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
|
||||||
offset = [])
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if len(options.labels) != len(options.offset):
|
|
||||||
parser.error('number of column labels and offsets do not match.')
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
for label,offset in zip(options.labels,options.offset):
|
|
||||||
table.set(label,table.get(label)+float(offset),scriptID+' '+' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,50 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Sort rows by given (or all) column label(s).
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fastest varying index: --label x,y,z.
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
|
||||||
dest = 'labels',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'list of column labels (a,b,c,...)')
|
|
||||||
parser.add_option('-r','--reverse',
|
|
||||||
dest = 'reverse',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'sort in reverse')
|
|
||||||
|
|
||||||
parser.set_defaults(reverse = False,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
if options.labels is None:
|
|
||||||
parser.error('no labels specified.')
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
table.sort_by(options.labels,not options.reverse)
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else name)
|
|
|
@ -1,42 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
|
|
||||||
Smooth microstructure by selecting most frequent index within given stencil at each location.
|
|
||||||
|
|
||||||
""", version=scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-s','--stencil',
|
|
||||||
dest = 'stencil',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'size of smoothing stencil [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(stencil = 3)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
damask.util.croak(geom.clean(options.stencil))
|
|
||||||
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=False)
|
|
|
@ -1,66 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
|
|
||||||
Generate homogeneous geometry.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-g','--grid',
|
|
||||||
dest = 'grid',
|
|
||||||
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
|
|
||||||
help = 'a,b,c grid of hexahedral box %default')
|
|
||||||
parser.add_option('-s', '--size',
|
|
||||||
dest = 'size',
|
|
||||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
|
||||||
help = 'x,y,z of geometry size')
|
|
||||||
parser.add_option('-o','--origin',
|
|
||||||
dest = 'origin',
|
|
||||||
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
|
|
||||||
help = 'x,y,z of geometry origin %default')
|
|
||||||
parser.add_option('--homogenization',
|
|
||||||
dest = 'homogenization',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'homogenization index [%default]')
|
|
||||||
parser.add_option('-f','--fill',
|
|
||||||
dest = 'fill',
|
|
||||||
type = 'float', metavar = 'int',
|
|
||||||
help = 'microstructure index [%default]')
|
|
||||||
|
|
||||||
parser.set_defaults(grid = (16,16,16),
|
|
||||||
origin = (0.,0.,0.),
|
|
||||||
homogenization = 1,
|
|
||||||
fill = 1,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options, filename) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
name = None if filename == [] else filename[0]
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
dtype = float if np.isnan(options.fill) or int(options.fill) != options.fill else int
|
|
||||||
geom = damask.Geom(microstructure=np.full(options.grid,options.fill,dtype=dtype),
|
|
||||||
size=options.size,
|
|
||||||
origin=options.origin,
|
|
||||||
homogenization=options.homogenization,
|
|
||||||
comments=scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
damask.util.croak(geom)
|
|
||||||
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=False)
|
|
|
@ -24,22 +24,22 @@ def findClosestSeed(seeds, weights, point):
|
||||||
def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2):
|
def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2):
|
||||||
|
|
||||||
if periodic:
|
if periodic:
|
||||||
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
|
weights_p = np.tile(weights.squeeze(),27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
|
||||||
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
||||||
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3,order='F')
|
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3)
|
||||||
else:
|
else:
|
||||||
weights_p = weights.flatten()
|
weights_p = weights.squeeze()
|
||||||
seeds_p = seeds
|
seeds_p = seeds
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F')
|
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
|
||||||
|
|
||||||
if cpus > 1:
|
if cpus > 1:
|
||||||
pool = multiprocessing.Pool(processes = cpus)
|
pool = multiprocessing.Pool(processes = cpus)
|
||||||
result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords])
|
result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords])
|
||||||
pool.close()
|
pool.close()
|
||||||
pool.join()
|
pool.join()
|
||||||
closest_seed = np.array(result.get())
|
closest_seed = np.array(result.get()).reshape(-1,3)
|
||||||
else:
|
else:
|
||||||
closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords])
|
closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords])
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), peri
|
||||||
|
|
||||||
def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True):
|
def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True):
|
||||||
|
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F')
|
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
|
||||||
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
|
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
|
||||||
devNull,closest_seed = KDTree.query(coords)
|
devNull,closest_seed = KDTree.query(coords)
|
||||||
|
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
|
|
||||||
Mirror along given directions.
|
|
||||||
|
|
||||||
""", version=scriptID)
|
|
||||||
|
|
||||||
validDirections = ['x','y','z']
|
|
||||||
|
|
||||||
parser.add_option('-d','--direction',
|
|
||||||
dest = 'directions',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = "directions in which to mirror {{{}}}".format(','.join(validDirections)))
|
|
||||||
parser.add_option( '--reflect',
|
|
||||||
dest = 'reflect',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'reflect (include) outermost layers')
|
|
||||||
|
|
||||||
parser.set_defaults(reflect = False)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
damask.util.croak(geom.mirror(options.directions,options.reflect))
|
|
||||||
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=False)
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
|
|
||||||
Pack ranges to "a to b" and/or multiples to "n of x".
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
damask.util.croak(geom)
|
|
||||||
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=True)
|
|
|
@ -1,60 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
|
|
||||||
Scales independently in x, y, and z direction in terms of grid and/or size.
|
|
||||||
Either absolute values or relative factors (like "0.25x") can be used.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-g', '--grid',
|
|
||||||
dest = 'grid',
|
|
||||||
type = 'string', nargs = 3, metavar = 'string string string',
|
|
||||||
help = 'a,b,c grid of hexahedral box')
|
|
||||||
parser.add_option('-s', '--size',
|
|
||||||
dest = 'size',
|
|
||||||
type = 'string', nargs = 3, metavar = 'string string string',
|
|
||||||
help = 'x,y,z size of hexahedral box')
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
grid = geom.get_grid()
|
|
||||||
size = geom.get_size()
|
|
||||||
|
|
||||||
new_grid = grid if options.grid is None else \
|
|
||||||
np.array([int(o*float(n.lower().replace('x',''))) if n.lower().endswith('x') \
|
|
||||||
else int(n) for o,n in zip(grid,options.grid)],dtype=int)
|
|
||||||
|
|
||||||
new_size = size if options.size is None else \
|
|
||||||
np.array([o*float(n.lower().replace('x','')) if n.lower().endswith('x') \
|
|
||||||
else float(n) for o,n in zip(size,options.size)],dtype=float)
|
|
||||||
|
|
||||||
geom.scale(new_grid)
|
|
||||||
damask.util.croak(geom.update(microstructure = None,size = new_size))
|
|
||||||
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=False)
|
|
|
@ -1,45 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
|
|
||||||
Translate geom description into ASCIItable containing position and microstructure.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
damask.util.croak(geom)
|
|
||||||
|
|
||||||
coord0 = damask.grid_filters.cell_coord0(geom.grid,geom.size,geom.origin).reshape(-1,3)
|
|
||||||
|
|
||||||
comments = geom.comments \
|
|
||||||
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
|
|
||||||
'grid\ta {}\tb {}\tc {}'.format(*geom.grid),
|
|
||||||
'size\tx {}\ty {}\tz {}'.format(*geom.size),
|
|
||||||
'origin\tx {}\ty {}\tz {}'.format(*geom.origin),
|
|
||||||
'homogenization\t{}'.format(geom.homogenization)]
|
|
||||||
|
|
||||||
table = damask.Table(coord0,{'pos':(3,)},comments)
|
|
||||||
table.add('microstructure',geom.microstructure.reshape((-1,1),order='F'))
|
|
||||||
|
|
||||||
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt')
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from optparse import OptionParser
|
|
||||||
from io import StringIO
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
|
|
||||||
Unpack ranges "a to b" and/or "n of x" multiples (exclusively in one line).
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
(options, filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
damask.util.croak(geom)
|
|
||||||
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
|
|
||||||
|
|
||||||
geom.to_file(sys.stdout if name is None else name,pack=False)
|
|
|
@ -54,7 +54,7 @@ for name in filenames:
|
||||||
np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \
|
np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \
|
||||||
np.full(geom.grid.prod(),True,dtype=bool))
|
np.full(geom.grid.prod(),True,dtype=bool))
|
||||||
|
|
||||||
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3)
|
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3,order='F')
|
||||||
|
|
||||||
comments = geom.comments \
|
comments = geom.comments \
|
||||||
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
|
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
|
||||||
|
|
|
@ -128,7 +128,7 @@ for name in filenames:
|
||||||
|
|
||||||
|
|
||||||
if not options.selective:
|
if not options.selective:
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3)
|
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
|
||||||
seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \
|
seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \
|
||||||
+ np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid
|
+ np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,2 +1,5 @@
|
||||||
[run]
|
[run]
|
||||||
omit = tests/*
|
omit = tests/*
|
||||||
|
damask/_asciitable.py
|
||||||
|
damask/_test.py
|
||||||
|
damask/config/*
|
||||||
|
|
|
@ -1,166 +0,0 @@
|
||||||
####################################################################################################
|
|
||||||
# Code below available according to the following conditions on
|
|
||||||
# https://github.com/MarDiehl/3Drotations
|
|
||||||
####################################################################################################
|
|
||||||
# Copyright (c) 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
# Copyright (c) 2013-2014, Marc De Graef/Carnegie Mellon University
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without modification, are
|
|
||||||
# permitted provided that the following conditions are met:
|
|
||||||
#
|
|
||||||
# - Redistributions of source code must retain the above copyright notice, this list
|
|
||||||
# of conditions and the following disclaimer.
|
|
||||||
# - Redistributions in binary form must reproduce the above copyright notice, this
|
|
||||||
# list of conditions and the following disclaimer in the documentation and/or
|
|
||||||
# other materials provided with the distribution.
|
|
||||||
# - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
|
|
||||||
# of its contributors may be used to endorse or promote products derived from
|
|
||||||
# this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
####################################################################################################
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
sc = np.pi**(1./6.)/6.**(1./6.)
|
|
||||||
beta = np.pi**(5./6.)/6.**(1./6.)/2.
|
|
||||||
R1 = (3.*np.pi/4.)**(1./3.)
|
|
||||||
|
|
||||||
def cube_to_ball(cube):
|
|
||||||
"""
|
|
||||||
Map a point in a uniform refinable cubical grid to a point on a uniform refinable grid on a ball.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
cube : numpy.ndarray
|
|
||||||
coordinates of a point in a uniform refinable cubical grid.
|
|
||||||
|
|
||||||
References
|
|
||||||
----------
|
|
||||||
D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014
|
|
||||||
https://doi.org/10.1088/0965-0393/22/7/075013
|
|
||||||
|
|
||||||
"""
|
|
||||||
if np.abs(np.max(cube))>np.pi**(2./3.) * 0.5:
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
# transform to the sphere grid via the curved square, and intercept the zero point
|
|
||||||
if np.allclose(cube,0.0,rtol=0.0,atol=1.0e-300):
|
|
||||||
ball = np.zeros(3)
|
|
||||||
else:
|
|
||||||
# get pyramide and scale by grid parameter ratio
|
|
||||||
p = _get_order(cube)
|
|
||||||
XYZ = cube[p] * sc
|
|
||||||
|
|
||||||
# intercept all the points along the z-axis
|
|
||||||
if np.allclose(XYZ[0:2],0.0,rtol=0.0,atol=1.0e-300):
|
|
||||||
ball = np.array([0.0, 0.0, np.sqrt(6.0/np.pi) * XYZ[2]])
|
|
||||||
else:
|
|
||||||
order = [1,0] if np.abs(XYZ[1]) <= np.abs(XYZ[0]) else [0,1]
|
|
||||||
q = np.pi/12.0 * XYZ[order[0]]/XYZ[order[1]]
|
|
||||||
c = np.cos(q)
|
|
||||||
s = np.sin(q)
|
|
||||||
q = R1*2.0**0.25/beta * XYZ[order[1]] / np.sqrt(np.sqrt(2.0)-c)
|
|
||||||
T = np.array([ (np.sqrt(2.0)*c - 1.0), np.sqrt(2.0) * s]) * q
|
|
||||||
|
|
||||||
# transform to sphere grid (inverse Lambert)
|
|
||||||
# note that there is no need to worry about dividing by zero, since XYZ[2] can not become zero
|
|
||||||
c = np.sum(T**2)
|
|
||||||
s = c * np.pi/24.0 /XYZ[2]**2
|
|
||||||
c = c * np.sqrt(np.pi/24.0)/XYZ[2]
|
|
||||||
q = np.sqrt( 1.0 - s )
|
|
||||||
ball = np.array([ T[order[1]] * q, T[order[0]] * q, np.sqrt(6.0/np.pi) * XYZ[2] - c ])
|
|
||||||
|
|
||||||
# reverse the coordinates back to the regular order according to the original pyramid number
|
|
||||||
ball = ball[p]
|
|
||||||
|
|
||||||
return ball
|
|
||||||
|
|
||||||
|
|
||||||
def ball_to_cube(ball):
|
|
||||||
"""
|
|
||||||
Map a point on a uniform refinable grid on a ball to a point in a uniform refinable cubical grid.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
ball : numpy.ndarray
|
|
||||||
coordinates of a point on a uniform refinable grid on a ball.
|
|
||||||
|
|
||||||
References
|
|
||||||
----------
|
|
||||||
D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014
|
|
||||||
https://doi.org/10.1088/0965-0393/22/7/075013
|
|
||||||
|
|
||||||
"""
|
|
||||||
rs = np.linalg.norm(ball)
|
|
||||||
if rs > R1:
|
|
||||||
raise ValueError
|
|
||||||
|
|
||||||
if np.allclose(ball,0.0,rtol=0.0,atol=1.0e-300):
|
|
||||||
cube = np.zeros(3)
|
|
||||||
else:
|
|
||||||
p = _get_order(ball)
|
|
||||||
xyz3 = ball[p]
|
|
||||||
|
|
||||||
# inverse M_3
|
|
||||||
xyz2 = xyz3[0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[2])) )
|
|
||||||
|
|
||||||
# inverse M_2
|
|
||||||
qxy = np.sum(xyz2**2)
|
|
||||||
|
|
||||||
if np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-300):
|
|
||||||
Tinv = np.zeros(2)
|
|
||||||
else:
|
|
||||||
q2 = qxy + np.max(np.abs(xyz2))**2
|
|
||||||
sq2 = np.sqrt(q2)
|
|
||||||
q = (beta/np.sqrt(2.0)/R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2))*sq2))
|
|
||||||
tt = np.clip((np.min(np.abs(xyz2))**2+np.max(np.abs(xyz2))*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0)
|
|
||||||
Tinv = np.array([1.0,np.arccos(tt)/np.pi*12.0]) if np.abs(xyz2[1]) <= np.abs(xyz2[0]) else \
|
|
||||||
np.array([np.arccos(tt)/np.pi*12.0,1.0])
|
|
||||||
Tinv = q * np.where(xyz2<0.0,-Tinv,Tinv)
|
|
||||||
|
|
||||||
# inverse M_1
|
|
||||||
cube = np.array([ Tinv[0], Tinv[1], (-1.0 if xyz3[2] < 0.0 else 1.0) * rs / np.sqrt(6.0/np.pi) ]) /sc
|
|
||||||
# reverse the coordinates back to the regular order according to the original pyramid number
|
|
||||||
cube = cube[p]
|
|
||||||
|
|
||||||
return cube
|
|
||||||
|
|
||||||
|
|
||||||
def _get_order(xyz):
|
|
||||||
"""
|
|
||||||
Get order of the coordinates.
|
|
||||||
|
|
||||||
Depending on the pyramid in which the point is located, the order need to be adjusted.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
xyz : numpy.ndarray
|
|
||||||
coordinates of a point on a uniform refinable grid on a ball or
|
|
||||||
in a uniform refinable cubical grid.
|
|
||||||
|
|
||||||
References
|
|
||||||
----------
|
|
||||||
D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014
|
|
||||||
https://doi.org/10.1088/0965-0393/22/7/075013
|
|
||||||
|
|
||||||
"""
|
|
||||||
if (abs(xyz[0])<= xyz[2]) and (abs(xyz[1])<= xyz[2]) or \
|
|
||||||
(abs(xyz[0])<=-xyz[2]) and (abs(xyz[1])<=-xyz[2]):
|
|
||||||
return [0,1,2]
|
|
||||||
elif (abs(xyz[2])<= xyz[0]) and (abs(xyz[1])<= xyz[0]) or \
|
|
||||||
(abs(xyz[2])<=-xyz[0]) and (abs(xyz[1])<=-xyz[0]):
|
|
||||||
return [1,2,0]
|
|
||||||
elif (abs(xyz[0])<= xyz[1]) and (abs(xyz[2])<= xyz[1]) or \
|
|
||||||
(abs(xyz[0])<=-xyz[1]) and (abs(xyz[2])<=-xyz[1]):
|
|
||||||
return [2,0,1]
|
|
|
@ -322,11 +322,10 @@ class Geom:
|
||||||
if i != grid.prod():
|
if i != grid.prod():
|
||||||
raise TypeError('Invalid file: expected {} entries, found {}'.format(grid.prod(),i))
|
raise TypeError('Invalid file: expected {} entries, found {}'.format(grid.prod(),i))
|
||||||
|
|
||||||
microstructure = microstructure.reshape(grid,order='F')
|
if not np.any(np.mod(microstructure,1) != 0.0): # no float present
|
||||||
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
|
|
||||||
microstructure = microstructure.astype('int')
|
microstructure = microstructure.astype('int')
|
||||||
|
|
||||||
return Geom(microstructure.reshape(grid),size,origin,homogenization,comments)
|
return Geom(microstructure.reshape(grid,order='F'),size,origin,homogenization,comments)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -352,16 +351,15 @@ class Geom:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if periodic:
|
if periodic:
|
||||||
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
|
weights_p = np.tile(weights,27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
|
||||||
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
||||||
coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3,order='F')
|
coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
weights_p = weights.flatten()
|
weights_p = weights
|
||||||
seeds_p = seeds
|
seeds_p = seeds
|
||||||
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
|
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
|
||||||
|
|
||||||
pool = multiprocessing.Pool(processes = int(Environment().options['DAMASK_NUM_THREADS']))
|
pool = multiprocessing.Pool(processes = int(Environment().options['DAMASK_NUM_THREADS']))
|
||||||
result = pool.map_async(partial(Geom._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
|
result = pool.map_async(partial(Geom._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
|
||||||
|
@ -396,7 +394,7 @@ class Geom:
|
||||||
perform a periodic tessellation. Defaults to True.
|
perform a periodic tessellation. Defaults to True.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
|
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
|
||||||
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
|
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
|
||||||
devNull,microstructure = KDTree.query(coords)
|
devNull,microstructure = KDTree.query(coords)
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,9 @@ class Orientation:
|
||||||
else:
|
else:
|
||||||
self.rotation = Rotation.fromQuaternion(rotation) # assume quaternion
|
self.rotation = Rotation.fromQuaternion(rotation) # assume quaternion
|
||||||
|
|
||||||
|
if self.rotation.quaternion.shape != (4,):
|
||||||
|
raise NotImplementedError('Support for multiple rotations missing')
|
||||||
|
|
||||||
def disorientation(self,
|
def disorientation(self,
|
||||||
other,
|
other,
|
||||||
SST = True,
|
SST = True,
|
||||||
|
|
|
@ -68,12 +68,12 @@ class Result:
|
||||||
self.con_physics = []
|
self.con_physics = []
|
||||||
for c in self.constituents:
|
for c in self.constituents:
|
||||||
self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys()
|
self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys()
|
||||||
self.con_physics = list(set(self.con_physics)) # make unique
|
self.con_physics = list(set(self.con_physics)) # make unique
|
||||||
|
|
||||||
self.mat_physics = []
|
self.mat_physics = []
|
||||||
for m in self.materialpoints:
|
for m in self.materialpoints:
|
||||||
self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys()
|
self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys()
|
||||||
self.mat_physics = list(set(self.mat_physics)) # make unique
|
self.mat_physics = list(set(self.mat_physics)) # make unique
|
||||||
|
|
||||||
self.selection = {'increments': self.increments,
|
self.selection = {'increments': self.increments,
|
||||||
'constituents': self.constituents,'materialpoints': self.materialpoints,
|
'constituents': self.constituents,'materialpoints': self.materialpoints,
|
||||||
|
@ -86,13 +86,19 @@ class Result:
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
"""Show selected data."""
|
"""Show selected data."""
|
||||||
all_selected_increments = self.selection['increments']
|
all_selected_increments = self.selection['increments']
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments[0:1])
|
self.pick('increments',all_selected_increments[0:1])
|
||||||
first = self.list_data()
|
first = self.list_data()
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments[-1:])
|
self.pick('increments',all_selected_increments[-1:])
|
||||||
last = self.list_data()
|
last = '' if len(all_selected_increments) < 2 else self.list_data()
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments)
|
self.pick('increments',all_selected_increments)
|
||||||
in_between = ''.join(['\n{}\n ...\n'.format(inc) for inc in all_selected_increments[1:-2]])
|
|
||||||
return util.srepr(first+ in_between + last)
|
in_between = '' if len(all_selected_increments) < 3 else \
|
||||||
|
''.join(['\n{}\n ...\n'.format(inc) for inc in all_selected_increments[1:-2]])
|
||||||
|
|
||||||
|
return util.srepr(first + in_between + last)
|
||||||
|
|
||||||
|
|
||||||
def _manage_selection(self,action,what,datasets):
|
def _manage_selection(self,action,what,datasets):
|
||||||
|
@ -105,7 +111,7 @@ class Result:
|
||||||
select from 'set', 'add', and 'del'
|
select from 'set', 'add', and 'del'
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.selection)
|
||||||
datasets : list of str or Boolean
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
|
@ -197,7 +203,7 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.selection)
|
||||||
datasets : list of str or Boolean
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
|
@ -213,7 +219,7 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.selection)
|
||||||
datasets : list of str or Boolean
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
|
@ -229,7 +235,7 @@ class Result:
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.selection)
|
||||||
datasets : list of str or Boolean
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
|
@ -256,10 +262,10 @@ class Result:
|
||||||
datasets : iterable or str
|
datasets : iterable or str
|
||||||
component : int
|
component : int
|
||||||
homogenization component to consider for constituent data
|
homogenization component to consider for constituent data
|
||||||
tagged : Boolean
|
tagged : bool
|
||||||
tag Table.column name with '#component'
|
tag Table.column name with '#component'
|
||||||
defaults to False
|
defaults to False
|
||||||
split : Boolean
|
split : bool
|
||||||
split Table by increment and return dictionary of Tables
|
split Table by increment and return dictionary of Tables
|
||||||
defaults to True
|
defaults to True
|
||||||
|
|
||||||
|
@ -320,7 +326,7 @@ class Result:
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
datasets : iterable or str or Boolean
|
datasets : iterable or str or bool
|
||||||
|
|
||||||
Examples
|
Examples
|
||||||
--------
|
--------
|
||||||
|
@ -454,11 +460,19 @@ class Result:
|
||||||
def cell_coordinates(self):
|
def cell_coordinates(self):
|
||||||
"""Return initial coordinates of the cell centers."""
|
"""Return initial coordinates of the cell centers."""
|
||||||
if self.structured:
|
if self.structured:
|
||||||
return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3)
|
return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
|
||||||
else:
|
else:
|
||||||
with h5py.File(self.fname,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
return f['geometry/x_c'][()]
|
return f['geometry/x_c'][()]
|
||||||
|
|
||||||
|
def node_coordinates(self):
|
||||||
|
"""Return initial coordinates of the cell centers."""
|
||||||
|
if self.structured:
|
||||||
|
return grid_filters.node_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
|
||||||
|
else:
|
||||||
|
with h5py.File(self.fname,'r') as f:
|
||||||
|
return f['geometry/x_n'][()]
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _add_absolute(x):
|
def _add_absolute(x):
|
||||||
|
@ -1009,7 +1023,7 @@ class Result:
|
||||||
continue
|
continue
|
||||||
lock.acquire()
|
lock.acquire()
|
||||||
with h5py.File(self.fname, 'a') as f:
|
with h5py.File(self.fname, 'a') as f:
|
||||||
try:
|
try: # ToDo: Replace if exists?
|
||||||
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
|
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
|
||||||
for l,v in result[1]['meta'].items():
|
for l,v in result[1]['meta'].items():
|
||||||
dataset.attrs[l]=v.encode()
|
dataset.attrs[l]=v.encode()
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,17 @@
|
||||||
|
"""
|
||||||
|
Filters for operations on regular grids.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
The grids are defined as (x,y,z,...) where x is fastest and z is slowest.
|
||||||
|
This convention is consistent with the geom file format.
|
||||||
|
When converting to/from a plain list (e.g. storage in ASCII table),
|
||||||
|
the following operations are required for tensorial data:
|
||||||
|
|
||||||
|
D3 = D1.reshape(grid+(-1,),order='F').reshape(grid+(3,3))
|
||||||
|
D1 = D3.reshape(grid+(-1,)).reshape(-1,9,order='F')
|
||||||
|
|
||||||
|
"""
|
||||||
from scipy import spatial as _spatial
|
from scipy import spatial as _spatial
|
||||||
import numpy as _np
|
import numpy as _np
|
||||||
|
|
||||||
|
@ -7,8 +21,12 @@ def _ks(size,grid,first_order=False):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
|
grid : numpy.ndarray of shape (3)
|
||||||
|
number of grid points.
|
||||||
|
first_order : bool, optional
|
||||||
|
correction for first order derivatives, defaults to False.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0]
|
k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0]
|
||||||
|
@ -19,8 +37,7 @@ def _ks(size,grid,first_order=False):
|
||||||
|
|
||||||
k_si = _np.arange(grid[2]//2+1)/size[2]
|
k_si = _np.arange(grid[2]//2+1)/size[2]
|
||||||
|
|
||||||
kk, kj, ki = _np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
|
return _np.stack(_np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij'), axis=-1)
|
||||||
return _np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
|
|
||||||
|
|
||||||
|
|
||||||
def curl(size,field):
|
def curl(size,field):
|
||||||
|
@ -29,8 +46,10 @@ def curl(size,field):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
|
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
|
||||||
|
periodic field of which the curl is calculated.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
n = _np.prod(field.shape[3:])
|
n = _np.prod(field.shape[3:])
|
||||||
|
@ -41,8 +60,8 @@ def curl(size,field):
|
||||||
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
|
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
|
||||||
|
|
||||||
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
||||||
curl_ = (_np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 3
|
curl_ = (_np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 3
|
||||||
_np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3x3
|
_np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3x3
|
||||||
|
|
||||||
return _np.fft.irfftn(curl_,axes=(0,1,2),s=field.shape[:3])
|
return _np.fft.irfftn(curl_,axes=(0,1,2),s=field.shape[:3])
|
||||||
|
|
||||||
|
@ -53,36 +72,40 @@ def divergence(size,field):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
|
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
|
||||||
|
periodic field of which the divergence is calculated.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
n = _np.prod(field.shape[3:])
|
n = _np.prod(field.shape[3:])
|
||||||
k_s = _ks(size,field.shape[:3],True)
|
k_s = _ks(size,field.shape[:3],True)
|
||||||
|
|
||||||
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
||||||
div_ = (_np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 1
|
div_ = (_np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 1
|
||||||
_np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3
|
_np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3
|
||||||
|
|
||||||
return _np.fft.irfftn(div_,axes=(0,1,2),s=field.shape[:3])
|
return _np.fft.irfftn(div_,axes=(0,1,2),s=field.shape[:3])
|
||||||
|
|
||||||
|
|
||||||
def gradient(size,field):
|
def gradient(size,field):
|
||||||
"""
|
"""
|
||||||
Calculate gradient of a vector or scalar field in Fourier space.
|
Calculate gradient of a scalar or vector field in Fourier space.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
|
field : numpy.ndarray of shape (:,:,:,1) or (:,:,:,3)
|
||||||
|
periodic field of which the gradient is calculated.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
n = _np.prod(field.shape[3:])
|
n = _np.prod(field.shape[3:])
|
||||||
k_s = _ks(size,field.shape[:3],True)
|
k_s = _ks(size,field.shape[:3],True)
|
||||||
|
|
||||||
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
|
||||||
grad_ = (_np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*_np.pi if n == 1 else # scalar, 1 -> 3
|
grad_ = (_np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*_np.pi if n == 1 else # scalar, 1 -> 3
|
||||||
_np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*_np.pi) # vector, 3 -> 3x3
|
_np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*_np.pi) # vector, 3 -> 3x3
|
||||||
|
|
||||||
return _np.fft.irfftn(grad_,axes=(0,1,2),s=field.shape[:3])
|
return _np.fft.irfftn(grad_,axes=(0,1,2),s=field.shape[:3])
|
||||||
|
|
||||||
|
@ -93,9 +116,9 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
grid : numpy.ndarray
|
grid : numpy.ndarray of shape (3)
|
||||||
number of grid points.
|
number of grid points.
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
origin : numpy.ndarray, optional
|
origin : numpy.ndarray, optional
|
||||||
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
||||||
|
@ -103,7 +126,11 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
|
||||||
"""
|
"""
|
||||||
start = origin + size/grid*.5
|
start = origin + size/grid*.5
|
||||||
end = origin + size - size/grid*.5
|
end = origin + size - size/grid*.5
|
||||||
return _np.mgrid[start[0]:end[0]:grid[0]*1j,start[1]:end[1]:grid[1]*1j,start[2]:end[2]:grid[2]*1j].T
|
|
||||||
|
return _np.stack(_np.meshgrid(_np.linspace(start[0],end[0],grid[0]),
|
||||||
|
_np.linspace(start[1],end[1],grid[1]),
|
||||||
|
_np.linspace(start[2],end[2],grid[2]),indexing = 'ij'),
|
||||||
|
axis = -1)
|
||||||
|
|
||||||
|
|
||||||
def cell_displacement_fluct(size,F):
|
def cell_displacement_fluct(size,F):
|
||||||
|
@ -112,7 +139,7 @@ def cell_displacement_fluct(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
@ -139,14 +166,14 @@ def cell_displacement_avg(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
F_avg = _np.average(F,axis=(0,1,2))
|
F_avg = _np.average(F,axis=(0,1,2))
|
||||||
return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),cell_coord0(F.shape[:3][::-1],size))
|
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),cell_coord0(F.shape[:3],size))
|
||||||
|
|
||||||
|
|
||||||
def cell_displacement(size,F):
|
def cell_displacement(size,F):
|
||||||
|
@ -155,7 +182,7 @@ def cell_displacement(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
@ -170,25 +197,25 @@ def cell_coord(size,F,origin=_np.zeros(3)):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
origin : numpy.ndarray, optional
|
origin : numpy.ndarray of shape (3), optional
|
||||||
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return cell_coord0(F.shape[:3][::-1],size,origin) + cell_displacement(size,F)
|
return cell_coord0(F.shape[:3],size,origin) + cell_displacement(size,F)
|
||||||
|
|
||||||
|
|
||||||
def cell_coord0_gridSizeOrigin(coord0,ordered=True):
|
def cell_coord0_gridSizeOrigin(coord0,ordered=True):
|
||||||
"""
|
"""
|
||||||
Return grid 'DNA', i.e. grid, size, and origin from array of cell positions.
|
Return grid 'DNA', i.e. grid, size, and origin from 1D array of cell positions.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
coord0 : numpy.ndarray
|
coord0 : numpy.ndarray of shape (:,3)
|
||||||
array of undeformed cell coordinates.
|
undeformed cell coordinates.
|
||||||
ordered : bool, optional
|
ordered : bool, optional
|
||||||
expect coord0 data to be ordered (x fast, z slow).
|
expect coord0 data to be ordered (x fast, z slow).
|
||||||
|
|
||||||
|
@ -211,13 +238,13 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True):
|
||||||
start = origin + delta*.5
|
start = origin + delta*.5
|
||||||
end = origin - delta*.5 + size
|
end = origin - delta*.5 + size
|
||||||
|
|
||||||
if not _np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
|
if not (_np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
|
||||||
_np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
|
_np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
|
||||||
_np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2])):
|
_np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2]))):
|
||||||
raise ValueError('Regular grid spacing violated.')
|
raise ValueError('Regular grid spacing violated.')
|
||||||
|
|
||||||
if ordered and not _np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)):
|
if ordered and not _np.allclose(coord0.reshape(tuple(grid)+(3,),order='F'),cell_coord0(grid,size,origin)):
|
||||||
raise ValueError('I_nput data is not a regular grid.')
|
raise ValueError('Input data is not ordered (x fast, z slow).')
|
||||||
|
|
||||||
return (grid,size,origin)
|
return (grid,size,origin)
|
||||||
|
|
||||||
|
@ -241,17 +268,18 @@ def node_coord0(grid,size,origin=_np.zeros(3)):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
grid : numpy.ndarray
|
grid : numpy.ndarray of shape (3)
|
||||||
number of grid points.
|
number of grid points.
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
origin : numpy.ndarray, optional
|
origin : numpy.ndarray of shape (3), optional
|
||||||
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return _np.mgrid[origin[0]:size[0]+origin[0]:(grid[0]+1)*1j,
|
return _np.stack(_np.meshgrid(_np.linspace(origin[0],size[0]+origin[0],grid[0]+1),
|
||||||
origin[1]:size[1]+origin[1]:(grid[1]+1)*1j,
|
_np.linspace(origin[1],size[1]+origin[1],grid[1]+1),
|
||||||
origin[2]:size[2]+origin[2]:(grid[2]+1)*1j].T
|
_np.linspace(origin[2],size[2]+origin[2],grid[2]+1),indexing = 'ij'),
|
||||||
|
axis = -1)
|
||||||
|
|
||||||
|
|
||||||
def node_displacement_fluct(size,F):
|
def node_displacement_fluct(size,F):
|
||||||
|
@ -260,7 +288,7 @@ def node_displacement_fluct(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
@ -275,14 +303,14 @@ def node_displacement_avg(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
F_avg = _np.average(F,axis=(0,1,2))
|
F_avg = _np.average(F,axis=(0,1,2))
|
||||||
return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),node_coord0(F.shape[:3][::-1],size))
|
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),node_coord0(F.shape[:3],size))
|
||||||
|
|
||||||
|
|
||||||
def node_displacement(size,F):
|
def node_displacement(size,F):
|
||||||
|
@ -291,7 +319,7 @@ def node_displacement(size,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
|
@ -306,15 +334,15 @@ def node_coord(size,F,origin=_np.zeros(3)):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size of the periodic field.
|
physical size of the periodic field.
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray
|
||||||
deformation gradient field.
|
deformation gradient field.
|
||||||
origin : numpy.ndarray, optional
|
origin : numpy.ndarray of shape (3), optional
|
||||||
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return node_coord0(F.shape[:3][::-1],size,origin) + node_displacement(size,F)
|
return node_coord0(F.shape[:3],size,origin) + node_displacement(size,F)
|
||||||
|
|
||||||
|
|
||||||
def cell_2_node(cell_data):
|
def cell_2_node(cell_data):
|
||||||
|
@ -335,14 +363,14 @@ def node_2_cell(node_data):
|
||||||
return c[:-1,:-1,:-1]
|
return c[:-1,:-1,:-1]
|
||||||
|
|
||||||
|
|
||||||
def node_coord0_gridSizeOrigin(coord0,ordered=False):
|
def node_coord0_gridSizeOrigin(coord0,ordered=True):
|
||||||
"""
|
"""
|
||||||
Return grid 'DNA', i.e. grid, size, and origin from array of nodal positions.
|
Return grid 'DNA', i.e. grid, size, and origin from 1D array of nodal positions.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
coord0 : numpy.ndarray
|
coord0 : numpy.ndarray of shape (:,3)
|
||||||
array of undeformed nodal coordinates.
|
undeformed nodal coordinates.
|
||||||
ordered : bool, optional
|
ordered : bool, optional
|
||||||
expect coord0 data to be ordered (x fast, z slow).
|
expect coord0 data to be ordered (x fast, z slow).
|
||||||
|
|
||||||
|
@ -357,13 +385,13 @@ def node_coord0_gridSizeOrigin(coord0,ordered=False):
|
||||||
if (grid+1).prod() != len(coord0):
|
if (grid+1).prod() != len(coord0):
|
||||||
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
|
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
|
||||||
|
|
||||||
if not _np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
|
if not (_np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
|
||||||
_np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
|
_np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
|
||||||
_np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1)):
|
_np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1))):
|
||||||
raise ValueError('Regular grid spacing violated.')
|
raise ValueError('Regular grid spacing violated.')
|
||||||
|
|
||||||
if ordered and not _np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)):
|
if ordered and not _np.allclose(coord0.reshape(tuple(grid+1)+(3,),order='F'),node_coord0(grid,size,origin)):
|
||||||
raise ValueError('I_nput data is not a regular grid.')
|
raise ValueError('Input data is not ordered (x fast, z slow).')
|
||||||
|
|
||||||
return (grid,size,origin)
|
return (grid,size,origin)
|
||||||
|
|
||||||
|
@ -374,15 +402,15 @@ def regrid(size,F,new_grid):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
size : numpy.ndarray
|
size : numpy.ndarray of shape (3)
|
||||||
physical size
|
physical size
|
||||||
F : numpy.ndarray
|
F : numpy.ndarray of shape (:,:,:,3,3)
|
||||||
deformation gradient field
|
deformation gradient field
|
||||||
new_grid : numpy.ndarray
|
new_grid : numpy.ndarray of shape (3)
|
||||||
new grid for undeformed coordinates
|
new grid for undeformed coordinates
|
||||||
|
|
||||||
"""
|
"""
|
||||||
c = cell_coord0(F.shape[:3][::-1],size) \
|
c = cell_coord0(F.shape[:3],size) \
|
||||||
+ cell_displacement_avg(size,F) \
|
+ cell_displacement_avg(size,F) \
|
||||||
+ cell_displacement_fluct(size,F)
|
+ cell_displacement_fluct(size,F)
|
||||||
|
|
||||||
|
|
|
@ -135,16 +135,16 @@ def PK2(P,F):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
P : numpy.ndarray of shape (:,3,3) or (3,3)
|
P : numpy.ndarray of shape (...,3,3) or (3,3)
|
||||||
First Piola-Kirchhoff stress.
|
First Piola-Kirchhoff stress.
|
||||||
F : numpy.ndarray of shape (:,3,3) or (3,3)
|
F : numpy.ndarray of shape (...,3,3) or (3,3)
|
||||||
Deformation gradient.
|
Deformation gradient.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if _np.shape(F) == _np.shape(P) == (3,3):
|
if _np.shape(F) == _np.shape(P) == (3,3):
|
||||||
S = _np.dot(_np.linalg.inv(F),P)
|
S = _np.dot(_np.linalg.inv(F),P)
|
||||||
else:
|
else:
|
||||||
S = _np.einsum('ijk,ikl->ijl',_np.linalg.inv(F),P)
|
S = _np.einsum('...jk,...kl->...jl',_np.linalg.inv(F),P)
|
||||||
return symmetric(S)
|
return symmetric(S)
|
||||||
|
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ def symmetric(T):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
T : numpy.ndarray of shape (:,3,3) or (3,3)
|
T : numpy.ndarray of shape (...,3,3) or (3,3)
|
||||||
Tensor of which the symmetrized values are computed.
|
Tensor of which the symmetrized values are computed.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -254,12 +254,12 @@ def transpose(T):
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
T : numpy.ndarray of shape (:,3,3) or (3,3)
|
T : numpy.ndarray of shape (...,3,3) or (3,3)
|
||||||
Tensor of which the transpose is computed.
|
Tensor of which the transpose is computed.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return T.T if _np.shape(T) == (3,3) else \
|
return T.T if _np.shape(T) == (3,3) else \
|
||||||
_np.transpose(T,(0,2,1))
|
_np.swapaxes(T,axis2=-2,axis1=-1)
|
||||||
|
|
||||||
|
|
||||||
def _polar_decomposition(T,requested):
|
def _polar_decomposition(T,requested):
|
||||||
|
|
|
@ -24,6 +24,10 @@ def reference_dir(reference_dir_base):
|
||||||
|
|
||||||
class TestResult:
|
class TestResult:
|
||||||
|
|
||||||
|
def test_self_report(self,default):
|
||||||
|
print(default)
|
||||||
|
|
||||||
|
|
||||||
def test_time_increments(self,default):
|
def test_time_increments(self,default):
|
||||||
shape = default.read_dataset(default.get_dataset_location('F'),0).shape
|
shape = default.read_dataset(default.get_dataset_location('F'),0).shape
|
||||||
default.set_by_time(0.0,20.0)
|
default.set_by_time(0.0,20.0)
|
||||||
|
|
|
@ -5,12 +5,82 @@ import numpy as np
|
||||||
|
|
||||||
from damask import Rotation
|
from damask import Rotation
|
||||||
|
|
||||||
n = 1000
|
n = 1100
|
||||||
|
atol=1.e-4
|
||||||
|
scatter=1.e-2
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def default():
|
def default():
|
||||||
"""A set of n random rotations."""
|
"""A set of n random rotations."""
|
||||||
return [Rotation.fromRandom() for r in range(n)]
|
specials = np.array(
|
||||||
|
[np.array([ 1.0, 0.0, 0.0, 0.0]),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([0.0, 1.0, 0.0, 0.0]),
|
||||||
|
np.array([0.0, 0.0, 1.0, 0.0]),
|
||||||
|
np.array([0.0, 0.0, 0.0, 1.0]),
|
||||||
|
np.array([0.0,-1.0, 0.0, 0.0]),
|
||||||
|
np.array([0.0, 0.0,-1.0, 0.0]),
|
||||||
|
np.array([0.0, 0.0, 0.0,-1.0]),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([1.0, 1.0, 0.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([1.0, 0.0, 1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([1.0, 0.0, 0.0, 1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 1.0, 1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 1.0, 0.0, 1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 0.0, 1.0, 1.0])/np.sqrt(2.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([1.0,-1.0, 0.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([1.0, 0.0,-1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([1.0, 0.0, 0.0,-1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 1.0,-1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 1.0, 0.0,-1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 0.0, 1.0,-1.0])/np.sqrt(2.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([0.0, 1.0,-1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 1.0, 0.0,-1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 0.0, 1.0,-1.0])/np.sqrt(2.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([0.0,-1.0,-1.0, 0.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0,-1.0, 0.0,-1.0])/np.sqrt(2.),
|
||||||
|
np.array([0.0, 0.0,-1.0,-1.0])/np.sqrt(2.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([1.0, 1.0, 1.0, 0.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 1.0, 0.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 0.0, 1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0,-1.0, 1.0, 0.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0,-1.0, 0.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 0.0,-1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 1.0,-1.0, 0.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 1.0, 0.0,-1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 0.0, 1.0,-1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0,-1.0,-1.0, 0.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0,-1.0, 0.0,-1.0])/np.sqrt(3.),
|
||||||
|
np.array([1.0, 0.0,-1.0,-1.0])/np.sqrt(3.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([0.0, 1.0, 1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0, 1.0,-1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0, 1.0, 1.0,-1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0,-1.0, 1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0,-1.0,-1.0, 1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0,-1.0, 1.0,-1.0])/np.sqrt(3.),
|
||||||
|
np.array([0.0,-1.0,-1.0,-1.0])/np.sqrt(3.),
|
||||||
|
#-----------------------------------------------
|
||||||
|
np.array([1.0, 1.0, 1.0, 1.0])/2.,
|
||||||
|
np.array([1.0,-1.0, 1.0, 1.0])/2.,
|
||||||
|
np.array([1.0, 1.0,-1.0, 1.0])/2.,
|
||||||
|
np.array([1.0, 1.0, 1.0,-1.0])/2.,
|
||||||
|
np.array([1.0,-1.0,-1.0, 1.0])/2.,
|
||||||
|
np.array([1.0,-1.0, 1.0,-1.0])/2.,
|
||||||
|
np.array([1.0, 1.0,-1.0,-1.0])/2.,
|
||||||
|
np.array([1.0,-1.0,-1.0,-1.0])/2.,
|
||||||
|
])
|
||||||
|
specials_scatter = specials + np.broadcast_to(np.random.rand(4)*scatter,specials.shape)
|
||||||
|
specials_scatter /= np.linalg.norm(specials_scatter,axis=1).reshape(-1,1)
|
||||||
|
specials_scatter[specials_scatter[:,0]<0]*=-1
|
||||||
|
|
||||||
|
return [Rotation.from_quaternion(s) for s in specials] + \
|
||||||
|
[Rotation.from_quaternion(s) for s in specials_scatter] + \
|
||||||
|
[Rotation.from_random() for _ in range(n-len(specials)-len(specials_scatter))]
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def reference_dir(reference_dir_base):
|
def reference_dir(reference_dir_base):
|
||||||
|
@ -22,35 +92,211 @@ class TestRotation:
|
||||||
|
|
||||||
def test_Eulers(self,default):
|
def test_Eulers(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asQuaternion(),
|
m = rot.as_quaternion()
|
||||||
Rotation.fromEulers(rot.asEulers()).asQuaternion())
|
o = Rotation.from_Eulers(rot.as_Eulers()).as_quaternion()
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
if np.isclose(rot.as_quaternion()[0],0.0,atol=atol):
|
||||||
|
ok = ok or np.allclose(m*-1.,o,atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and np.isclose(np.linalg.norm(o),1.0)
|
||||||
|
|
||||||
def test_AxisAngle(self,default):
|
def test_AxisAngle(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asEulers(),
|
m = rot.as_Eulers()
|
||||||
Rotation.fromAxisAngle(rot.asAxisAngle()).asEulers())
|
o = Rotation.from_axis_angle(rot.as_axis_angle()).as_Eulers()
|
||||||
|
u = np.array([np.pi*2,np.pi,np.pi*2])
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
ok = ok or np.allclose(np.where(np.isclose(m,u),m-u,m),np.where(np.isclose(o,u),o-u,o),atol=atol)
|
||||||
|
if np.isclose(m[1],0.0,atol=atol) or np.isclose(m[1],np.pi,atol=atol):
|
||||||
|
sum_phi = np.unwrap([m[0]+m[2],o[0]+o[2]])
|
||||||
|
ok = ok or np.isclose(sum_phi[0],sum_phi[1],atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and (np.zeros(3)-1.e-9 <= o).all() and (o <= np.array([np.pi*2.,np.pi,np.pi*2.])+1.e-9).all()
|
||||||
|
|
||||||
def test_Matrix(self,default):
|
def test_Matrix(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asAxisAngle(),
|
m = rot.as_axis_angle()
|
||||||
Rotation.fromMatrix(rot.asMatrix()).asAxisAngle())
|
o = Rotation.from_axis_angle(rot.as_axis_angle()).as_axis_angle()
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
if np.isclose(m[3],np.pi,atol=atol):
|
||||||
|
ok = ok or np.allclose(m*np.array([-1.,-1.,-1.,1.]),o,atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and np.isclose(np.linalg.norm(o[:3]),1.0) and o[3]<=np.pi++1.e-9
|
||||||
|
|
||||||
def test_Rodriques(self,default):
|
def test_Rodrigues(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asMatrix(),
|
m = rot.as_matrix()
|
||||||
Rotation.fromRodrigues(rot.asRodrigues()).asMatrix())
|
o = Rotation.from_Rodrigues(rot.as_Rodrigues()).as_matrix()
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
print(m,o)
|
||||||
|
assert ok and np.isclose(np.linalg.det(o),1.0)
|
||||||
|
|
||||||
def test_Homochoric(self,default):
|
def test_Homochoric(self,default):
|
||||||
|
cutoff = np.tan(np.pi*.5*(1.-1e-4))
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asRodrigues(),
|
m = rot.as_Rodrigues()
|
||||||
Rotation.fromHomochoric(rot.asHomochoric()).asRodrigues(),rtol=1.e-4)
|
o = Rotation.from_homochoric(rot.as_homochoric()).as_Rodrigues()
|
||||||
|
ok = np.allclose(np.clip(m,None,cutoff),np.clip(o,None,cutoff),atol=atol)
|
||||||
|
ok = ok or np.isclose(m[3],0.0,atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and np.isclose(np.linalg.norm(o[:3]),1.0)
|
||||||
|
|
||||||
def test_Cubochoric(self,default):
|
def test_Cubochoric(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asHomochoric(),
|
m = rot.as_homochoric()
|
||||||
Rotation.fromCubochoric(rot.asCubochoric()).asHomochoric())
|
o = Rotation.from_cubochoric(rot.as_cubochoric()).as_homochoric()
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and np.linalg.norm(o) < (3.*np.pi/4.)**(1./3.) + 1.e-9
|
||||||
|
|
||||||
def test_Quaternion(self,default):
|
def test_Quaternion(self,default):
|
||||||
for rot in default:
|
for rot in default:
|
||||||
assert np.allclose(rot.asCubochoric(),
|
m = rot.as_cubochoric()
|
||||||
Rotation.fromQuaternion(rot.asQuaternion()).asCubochoric())
|
o = Rotation.from_quaternion(rot.as_quaternion()).as_cubochoric()
|
||||||
|
ok = np.allclose(m,o,atol=atol)
|
||||||
|
print(m,o,rot.as_quaternion())
|
||||||
|
assert ok and o.max() < np.pi**(2./3.)*0.5+1.e-9
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('function',[Rotation.from_quaternion,
|
||||||
|
Rotation.from_Eulers,
|
||||||
|
Rotation.from_axis_angle,
|
||||||
|
Rotation.from_matrix,
|
||||||
|
Rotation.from_Rodrigues,
|
||||||
|
Rotation.from_homochoric])
|
||||||
|
def test_invalid_shape(self,function):
|
||||||
|
invalid_shape = np.random.random(np.random.randint(8,32,(3)))
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
function(invalid_shape)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('function,invalid',[(Rotation.from_quaternion, np.array([-1,0,0,0])),
|
||||||
|
(Rotation.from_quaternion, np.array([1,1,1,0])),
|
||||||
|
(Rotation.from_Eulers, np.array([1,4,0])),
|
||||||
|
(Rotation.from_axis_angle, np.array([1,0,0,4])),
|
||||||
|
(Rotation.from_axis_angle, np.array([1,1,0,1])),
|
||||||
|
(Rotation.from_matrix, np.random.rand(3,3)),
|
||||||
|
(Rotation.from_Rodrigues, np.array([1,0,0,-1])),
|
||||||
|
(Rotation.from_Rodrigues, np.array([1,1,0,1])),
|
||||||
|
(Rotation.from_homochoric, np.array([2,2,2])) ])
|
||||||
|
def test_invalid(self,function,invalid):
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
function(invalid)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.qu2om,
|
||||||
|
Rotation.qu2eu,
|
||||||
|
Rotation.qu2ax,
|
||||||
|
Rotation.qu2ro,
|
||||||
|
Rotation.qu2ho,
|
||||||
|
Rotation.qu2cu
|
||||||
|
])
|
||||||
|
def test_quaternion_vectorization(self,default,conversion):
|
||||||
|
qu = np.array([rot.as_quaternion() for rot in default])
|
||||||
|
conversion(qu.reshape(qu.shape[0]//2,-1,4))
|
||||||
|
co = conversion(qu)
|
||||||
|
for q,c in zip(qu,co):
|
||||||
|
print(q,c)
|
||||||
|
assert np.allclose(conversion(q),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.om2qu,
|
||||||
|
Rotation.om2eu,
|
||||||
|
Rotation.om2ax,
|
||||||
|
Rotation.om2ro,
|
||||||
|
Rotation.om2ho,
|
||||||
|
Rotation.om2cu
|
||||||
|
])
|
||||||
|
def test_matrix_vectorization(self,default,conversion):
|
||||||
|
om = np.array([rot.as_matrix() for rot in default])
|
||||||
|
conversion(om.reshape(om.shape[0]//2,-1,3,3))
|
||||||
|
co = conversion(om)
|
||||||
|
for o,c in zip(om,co):
|
||||||
|
print(o,c)
|
||||||
|
assert np.allclose(conversion(o),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.eu2qu,
|
||||||
|
Rotation.eu2om,
|
||||||
|
Rotation.eu2ax,
|
||||||
|
Rotation.eu2ro,
|
||||||
|
Rotation.eu2ho,
|
||||||
|
Rotation.eu2cu
|
||||||
|
])
|
||||||
|
def test_Euler_vectorization(self,default,conversion):
|
||||||
|
eu = np.array([rot.as_Eulers() for rot in default])
|
||||||
|
conversion(eu.reshape(eu.shape[0]//2,-1,3))
|
||||||
|
co = conversion(eu)
|
||||||
|
for e,c in zip(eu,co):
|
||||||
|
print(e,c)
|
||||||
|
assert np.allclose(conversion(e),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.ax2qu,
|
||||||
|
Rotation.ax2om,
|
||||||
|
Rotation.ax2eu,
|
||||||
|
Rotation.ax2ro,
|
||||||
|
Rotation.ax2ho,
|
||||||
|
Rotation.ax2cu
|
||||||
|
])
|
||||||
|
def test_axisAngle_vectorization(self,default,conversion):
|
||||||
|
ax = np.array([rot.as_axis_angle() for rot in default])
|
||||||
|
conversion(ax.reshape(ax.shape[0]//2,-1,4))
|
||||||
|
co = conversion(ax)
|
||||||
|
for a,c in zip(ax,co):
|
||||||
|
print(a,c)
|
||||||
|
assert np.allclose(conversion(a),c)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.ro2qu,
|
||||||
|
Rotation.ro2om,
|
||||||
|
Rotation.ro2eu,
|
||||||
|
Rotation.ro2ax,
|
||||||
|
Rotation.ro2ho,
|
||||||
|
Rotation.ro2cu
|
||||||
|
])
|
||||||
|
def test_Rodrigues_vectorization(self,default,conversion):
|
||||||
|
ro = np.array([rot.as_Rodrigues() for rot in default])
|
||||||
|
conversion(ro.reshape(ro.shape[0]//2,-1,4))
|
||||||
|
co = conversion(ro)
|
||||||
|
for r,c in zip(ro,co):
|
||||||
|
print(r,c)
|
||||||
|
assert np.allclose(conversion(r),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.ho2qu,
|
||||||
|
Rotation.ho2om,
|
||||||
|
Rotation.ho2eu,
|
||||||
|
Rotation.ho2ax,
|
||||||
|
Rotation.ho2ro,
|
||||||
|
Rotation.ho2cu
|
||||||
|
])
|
||||||
|
def test_homochoric_vectorization(self,default,conversion):
|
||||||
|
ho = np.array([rot.as_homochoric() for rot in default])
|
||||||
|
conversion(ho.reshape(ho.shape[0]//2,-1,3))
|
||||||
|
co = conversion(ho)
|
||||||
|
for h,c in zip(ho,co):
|
||||||
|
print(h,c)
|
||||||
|
assert np.allclose(conversion(h),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('conversion',[Rotation.cu2qu,
|
||||||
|
Rotation.cu2om,
|
||||||
|
Rotation.cu2eu,
|
||||||
|
Rotation.cu2ax,
|
||||||
|
Rotation.cu2ro,
|
||||||
|
Rotation.cu2ho
|
||||||
|
])
|
||||||
|
def test_cubochoric_vectorization(self,default,conversion):
|
||||||
|
cu = np.array([rot.as_cubochoric() for rot in default])
|
||||||
|
conversion(cu.reshape(cu.shape[0]//2,-1,3))
|
||||||
|
co = conversion(cu)
|
||||||
|
for u,c in zip(cu,co):
|
||||||
|
print(u,c)
|
||||||
|
assert np.allclose(conversion(u),c)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('direction',['forward',
|
||||||
|
'backward'])
|
||||||
|
def test_pyramid_vectorization(self,direction):
|
||||||
|
p = np.random.rand(n,3)
|
||||||
|
o = Rotation._get_pyramid_order(p,direction)
|
||||||
|
for i,o_i in enumerate(o):
|
||||||
|
assert np.all(o_i==Rotation._get_pyramid_order(p[i],direction))
|
||||||
|
|
||||||
|
def test_pyramid_invariant(self):
|
||||||
|
a = np.random.rand(n,3)
|
||||||
|
f = Rotation._get_pyramid_order(a,'forward')
|
||||||
|
b = Rotation._get_pyramid_order(a,'backward')
|
||||||
|
assert np.all(np.take_along_axis(np.take_along_axis(a,f,-1),b,-1) == a)
|
||||||
|
|
|
@ -9,13 +9,13 @@ class TestGridFilters:
|
||||||
size = np.random.random(3)
|
size = np.random.random(3)
|
||||||
grid = np.random.randint(8,32,(3))
|
grid = np.random.randint(8,32,(3))
|
||||||
coord = grid_filters.cell_coord0(grid,size)
|
coord = grid_filters.cell_coord0(grid,size)
|
||||||
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid[::-1]) + (3,)
|
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid) + (3,)
|
||||||
|
|
||||||
def test_node_coord0(self):
|
def test_node_coord0(self):
|
||||||
size = np.random.random(3)
|
size = np.random.random(3)
|
||||||
grid = np.random.randint(8,32,(3))
|
grid = np.random.randint(8,32,(3))
|
||||||
coord = grid_filters.node_coord0(grid,size)
|
coord = grid_filters.node_coord0(grid,size)
|
||||||
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid[::-1]+1) + (3,)
|
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid+1) + (3,)
|
||||||
|
|
||||||
def test_coord0(self):
|
def test_coord0(self):
|
||||||
size = np.random.random(3)
|
size = np.random.random(3)
|
||||||
|
@ -31,7 +31,7 @@ class TestGridFilters:
|
||||||
size = np.random.random(3)
|
size = np.random.random(3)
|
||||||
origin = np.random.random(3)
|
origin = np.random.random(3)
|
||||||
coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa
|
coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa
|
||||||
_grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3))'.format(mode))
|
_grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3,order="F"))'.format(mode))
|
||||||
assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin)
|
assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin)
|
||||||
|
|
||||||
def test_displacement_fluct_equivalence(self):
|
def test_displacement_fluct_equivalence(self):
|
||||||
|
@ -57,9 +57,9 @@ class TestGridFilters:
|
||||||
shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode))
|
shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode))
|
||||||
unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode))
|
unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode))
|
||||||
if mode == 'cell':
|
if mode == 'cell':
|
||||||
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]) +(3,)))
|
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid) +(3,)))
|
||||||
elif mode == 'node':
|
elif mode == 'node':
|
||||||
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,)))
|
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid+1)+(3,)))
|
||||||
|
|
||||||
@pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg,
|
@pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg,
|
||||||
grid_filters.node_displacement_avg])
|
grid_filters.node_displacement_avg])
|
||||||
|
@ -80,8 +80,227 @@ class TestGridFilters:
|
||||||
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3))
|
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3))
|
||||||
assert np.allclose(function(size,F),0.0)
|
assert np.allclose(function(size,F),0.0)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('function',[grid_filters.coord0_check,
|
||||||
|
grid_filters.node_coord0_gridSizeOrigin,
|
||||||
|
grid_filters.cell_coord0_gridSizeOrigin])
|
||||||
|
def test_invalid_coordinates(self,function):
|
||||||
|
invalid_coordinates = np.random.random((np.random.randint(12,52),3))
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
function(invalid_coordinates)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
|
||||||
|
grid_filters.cell_coord0_gridSizeOrigin])
|
||||||
|
def test_uneven_spaced_coordinates(self,function):
|
||||||
|
start = np.random.random(3)
|
||||||
|
end = np.random.random(3)*10. + start
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
uneven = np.stack(np.meshgrid(np.logspace(start[0],end[0],grid[0]),
|
||||||
|
np.logspace(start[1],end[1],grid[1]),
|
||||||
|
np.logspace(start[2],end[2],grid[2]),indexing = 'ij'),
|
||||||
|
axis = -1).reshape((grid.prod(),3),order='F')
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
function(uneven)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('mode',[True,False])
|
||||||
|
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
|
||||||
|
grid_filters.cell_coord0_gridSizeOrigin])
|
||||||
|
def test_unordered_coordinates(self,function,mode):
|
||||||
|
origin = np.random.random(3)
|
||||||
|
size = np.random.random(3)*10.+origin
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
unordered = grid_filters.node_coord0(grid,size,origin).reshape(-1,3)
|
||||||
|
if mode:
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
function(unordered,mode)
|
||||||
|
else:
|
||||||
|
function(unordered,mode)
|
||||||
|
|
||||||
def test_regrid(self):
|
def test_regrid(self):
|
||||||
size = np.random.random(3)
|
size = np.random.random(3)
|
||||||
grid = np.random.randint(8,32,(3))
|
grid = np.random.randint(8,32,(3))
|
||||||
F = np.broadcast_to(np.eye(3), tuple(grid[::-1])+(3,3))
|
F = np.broadcast_to(np.eye(3), tuple(grid)+(3,3))
|
||||||
assert all(grid_filters.regrid(size,F,grid) == np.arange(grid.prod()))
|
assert all(grid_filters.regrid(size,F,grid) == np.arange(grid.prod()))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('differential_operator',[grid_filters.curl,
|
||||||
|
grid_filters.divergence,
|
||||||
|
grid_filters.gradient])
|
||||||
|
def test_differential_operator_constant(self,differential_operator):
|
||||||
|
size = np.random.random(3)+1.0
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
shapes = {
|
||||||
|
grid_filters.curl: [(3,),(3,3)],
|
||||||
|
grid_filters.divergence:[(3,),(3,3)],
|
||||||
|
grid_filters.gradient: [(1,),(3,)]
|
||||||
|
}
|
||||||
|
for shape in shapes[differential_operator]:
|
||||||
|
field = np.ones(tuple(grid)+shape)*np.random.random()*1.0e5
|
||||||
|
assert np.allclose(differential_operator(size,field),0.0)
|
||||||
|
|
||||||
|
|
||||||
|
grad_test_data = [
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])', '0.0', '0.0'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0']),
|
||||||
|
|
||||||
|
(['0.0', 'np.cos(np.pi*2*nodes[...,1]/size[1])', '0.0' ],
|
||||||
|
['0.0', '0.0', '0.0',
|
||||||
|
'0.0', '-np.pi*2/size[1]*np.sin(np.pi*2*nodes[...,1]/size[1])', '0.0',
|
||||||
|
'0.0', '0.0', '0.0' ]),
|
||||||
|
|
||||||
|
(['1.0', '0.0', '2.0*np.cos(np.pi*2*nodes[...,2]/size[2])'],
|
||||||
|
['0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '-2.0*np.pi*2/size[2]*np.sin(np.pi*2*nodes[...,2]/size[2])']),
|
||||||
|
|
||||||
|
(['np.cos(np.pi*2*nodes[...,2]/size[2])', '3.0', 'np.sin(np.pi*2*nodes[...,2]/size[2])'],
|
||||||
|
['0.0', '0.0', '-np.sin(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', ' np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]']),
|
||||||
|
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])',
|
||||||
|
'np.sin(np.pi*2*nodes[...,1]/size[1])',
|
||||||
|
'np.sin(np.pi*2*nodes[...,2]/size[2])'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]', '0.0', '0.0',
|
||||||
|
'0.0', 'np.cos(np.pi*2*nodes[...,1]/size[1])*np.pi*2/size[1]', '0.0',
|
||||||
|
'0.0', '0.0', 'np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]']),
|
||||||
|
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]', '0.0', '0.0']),
|
||||||
|
|
||||||
|
(['8.0'],
|
||||||
|
['0.0', '0.0', '0.0' ])
|
||||||
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('field_def,grad_def',grad_test_data)
|
||||||
|
def test_grad(self,field_def,grad_def):
|
||||||
|
size = np.random.random(3)+1.0
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
|
||||||
|
nodes = grid_filters.cell_coord0(grid,size)
|
||||||
|
my_locals = locals() # needed for list comprehension
|
||||||
|
|
||||||
|
field = np.stack([np.broadcast_to(eval(f,globals(),my_locals),grid) for f in field_def],axis=-1)
|
||||||
|
field = field.reshape(tuple(grid) + ((3,) if len(field_def)==3 else (1,)))
|
||||||
|
grad = np.stack([np.broadcast_to(eval(c,globals(),my_locals),grid) for c in grad_def], axis=-1)
|
||||||
|
grad = grad.reshape(tuple(grid) + ((3,3) if len(grad_def)==9 else (3,)))
|
||||||
|
|
||||||
|
assert np.allclose(grad,grid_filters.gradient(size,field))
|
||||||
|
|
||||||
|
|
||||||
|
curl_test_data = [
|
||||||
|
(['np.sin(np.pi*2*nodes[...,2]/size[2])', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0'],
|
||||||
|
['0.0' , '0.0', '0.0',
|
||||||
|
'np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0']),
|
||||||
|
|
||||||
|
(['np.cos(np.pi*2*nodes[...,1]/size[1])', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'np.cos(np.pi*2*nodes[...,0]/size[0])', '0.0', '0.0'],
|
||||||
|
['0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'np.sin(np.pi*2*nodes[...,1]/size[1])*np.pi*2/size[1]', '0.0', '0.0']),
|
||||||
|
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])','np.cos(np.pi*2*nodes[...,1]/size[1])','np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'np.sin(np.pi*2*nodes[...,0]/size[0])','np.cos(np.pi*2*nodes[...,1]/size[1])','np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'np.sin(np.pi*2*nodes[...,0]/size[0])','np.cos(np.pi*2*nodes[...,1]/size[1])','np.sin(np.pi*2*nodes[...,2]/size[2])'],
|
||||||
|
['0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0']),
|
||||||
|
|
||||||
|
(['5.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '2*np.cos(np.pi*2*nodes[...,1]/size[1])'],
|
||||||
|
['0.0', '0.0', '-2*np.pi*2/size[1]*np.sin(np.pi*2*nodes[...,1]/size[1])',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0']),
|
||||||
|
|
||||||
|
([ '4*np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'8*np.sin(np.pi*2*nodes[...,0]/size[0])',
|
||||||
|
'16*np.sin(np.pi*2*nodes[...,1]/size[1])'],
|
||||||
|
['16*np.pi*2/size[1]*np.cos(np.pi*2*nodes[...,1]/size[1])',
|
||||||
|
'4*np.pi*2/size[2]*np.cos(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'8*np.pi*2/size[0]*np.cos(np.pi*2*nodes[...,0]/size[0])']),
|
||||||
|
|
||||||
|
(['0.0',
|
||||||
|
'np.cos(np.pi*2*nodes[...,0]/size[0])+5*np.cos(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'0.0'],
|
||||||
|
['5*np.sin(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]',
|
||||||
|
'0.0',
|
||||||
|
'-np.sin(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]'])
|
||||||
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('field_def,curl_def',curl_test_data)
|
||||||
|
def test_curl(self,field_def,curl_def):
|
||||||
|
size = np.random.random(3)+1.0
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
|
||||||
|
nodes = grid_filters.cell_coord0(grid,size)
|
||||||
|
my_locals = locals() # needed for list comprehension
|
||||||
|
|
||||||
|
field = np.stack([np.broadcast_to(eval(f,globals(),my_locals),grid) for f in field_def],axis=-1)
|
||||||
|
field = field.reshape(tuple(grid) + ((3,3) if len(field_def)==9 else (3,)))
|
||||||
|
curl = np.stack([np.broadcast_to(eval(c,globals(),my_locals),grid) for c in curl_def], axis=-1)
|
||||||
|
curl = curl.reshape(tuple(grid) + ((3,3) if len(curl_def)==9 else (3,)))
|
||||||
|
|
||||||
|
assert np.allclose(curl,grid_filters.curl(size,field))
|
||||||
|
|
||||||
|
|
||||||
|
div_test_data =[
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])', '0.0', '0.0',
|
||||||
|
'0.0' , '0.0', '0.0',
|
||||||
|
'0.0' , '0.0', '0.0'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]','0.0', '0.0']),
|
||||||
|
|
||||||
|
(['0.0', '0.0', '0.0',
|
||||||
|
'0.0', 'np.cos(np.pi*2*nodes[...,1]/size[1])', '0.0',
|
||||||
|
'0.0', '0.0', '0.0'],
|
||||||
|
['0.0', '-np.sin(np.pi*2*nodes[...,1]/size[1])*np.pi*2/size[1]', '0.0']),
|
||||||
|
|
||||||
|
(['1.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '0.0',
|
||||||
|
'0.0', '0.0', '2*np.cos(np.pi*2*nodes[...,2]/size[2])' ],
|
||||||
|
['0.0', '0.0', '-2.0*np.pi*2/size[2]*np.sin(np.pi*2*nodes[...,2]/size[2])']
|
||||||
|
),
|
||||||
|
|
||||||
|
([ '23.0', '0.0', 'np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'0.0', '100.0', 'np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'0.0', '0.0', 'np.sin(np.pi*2*nodes[...,2]/size[2])'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]',\
|
||||||
|
'np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]', \
|
||||||
|
'np.cos(np.pi*2*nodes[...,2]/size[2])*np.pi*2/size[2]']),
|
||||||
|
|
||||||
|
(['400.0', '0.0', '0.0',
|
||||||
|
'np.sin(np.pi*2*nodes[...,0]/size[0])', 'np.sin(np.pi*2*nodes[...,1]/size[1])', 'np.sin(np.pi*2*nodes[...,2]/size[2])',
|
||||||
|
'0.0', '10.0', '6.0'],
|
||||||
|
['0.0','np.sum(np.cos(np.pi*2*nodes/size)*np.pi*2/size,axis=-1)', '0.0' ]),
|
||||||
|
|
||||||
|
(['np.sin(np.pi*2*nodes[...,0]/size[0])', '0.0', '0.0'],
|
||||||
|
['np.cos(np.pi*2*nodes[...,0]/size[0])*np.pi*2/size[0]',]),
|
||||||
|
|
||||||
|
(['0.0', 'np.cos(np.pi*2*nodes[...,1]/size[1])', '0.0' ],
|
||||||
|
['-np.sin(np.pi*2*nodes[...,1]/size[1])*np.pi*2/size[1]'])
|
||||||
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('field_def,div_def',div_test_data)
|
||||||
|
|
||||||
|
def test_div(self,field_def,div_def):
|
||||||
|
size = np.random.random(3)+1.0
|
||||||
|
grid = np.random.randint(8,32,(3))
|
||||||
|
|
||||||
|
nodes = grid_filters.cell_coord0(grid,size)
|
||||||
|
my_locals = locals() # needed for list comprehension
|
||||||
|
|
||||||
|
field = np.stack([np.broadcast_to(eval(f,globals(),my_locals),grid) for f in field_def],axis=-1)
|
||||||
|
field = field.reshape(tuple(grid) + ((3,3) if len(field_def)==9 else (3,)))
|
||||||
|
div = np.stack([np.broadcast_to(eval(c,globals(),my_locals),grid) for c in div_def], axis=-1)
|
||||||
|
if len(div_def)==3:
|
||||||
|
div = div.reshape(tuple(grid) + ((3,)))
|
||||||
|
else:
|
||||||
|
div=div.reshape(tuple(grid))
|
||||||
|
|
||||||
|
assert np.allclose(div,grid_filters.divergence(size,field))
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
import pytest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from damask import mechanics
|
from damask import mechanics
|
||||||
|
|
||||||
class TestMechanics:
|
class TestMechanics:
|
||||||
|
@ -7,127 +9,77 @@ class TestMechanics:
|
||||||
c = np.random.randint(n)
|
c = np.random.randint(n)
|
||||||
|
|
||||||
|
|
||||||
def test_vectorize_Cauchy(self):
|
@pytest.mark.parametrize('function',[mechanics.deviatoric_part,
|
||||||
P = np.random.random((self.n,3,3))
|
mechanics.eigenvalues,
|
||||||
F = np.random.random((self.n,3,3))
|
mechanics.eigenvectors,
|
||||||
assert np.allclose(mechanics.Cauchy(P,F)[self.c],
|
mechanics.left_stretch,
|
||||||
mechanics.Cauchy(P[self.c],F[self.c]))
|
mechanics.maximum_shear,
|
||||||
|
mechanics.Mises_strain,
|
||||||
|
mechanics.Mises_stress,
|
||||||
|
mechanics.right_stretch,
|
||||||
|
mechanics.rotational_part,
|
||||||
|
mechanics.spherical_part,
|
||||||
|
mechanics.symmetric,
|
||||||
|
mechanics.transpose,
|
||||||
|
])
|
||||||
|
def test_vectorize_1_arg(self,function):
|
||||||
|
epsilon = np.random.rand(self.n,3,3)
|
||||||
|
assert np.allclose(function(epsilon)[self.c],function(epsilon[self.c]))
|
||||||
|
|
||||||
def test_vectorize_deviatoric_part(self):
|
@pytest.mark.parametrize('function',[mechanics.Cauchy,
|
||||||
x = np.random.random((self.n,3,3))
|
mechanics.PK2,
|
||||||
assert np.allclose(mechanics.deviatoric_part(x)[self.c],
|
])
|
||||||
mechanics.deviatoric_part(x[self.c]))
|
def test_vectorize_2_arg(self,function):
|
||||||
|
P = np.random.rand(self.n,3,3)
|
||||||
def test_vectorize_eigenvalues(self):
|
F = np.random.rand(self.n,3,3)
|
||||||
x = np.random.random((self.n,3,3))
|
assert np.allclose(function(P,F)[self.c],function(P[self.c],F[self.c]))
|
||||||
assert np.allclose(mechanics.eigenvalues(x)[self.c],
|
|
||||||
mechanics.eigenvalues(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_eigenvectors(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.eigenvectors(x)[self.c],
|
|
||||||
mechanics.eigenvectors(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_left_stretch(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.left_stretch(x)[self.c],
|
|
||||||
mechanics.left_stretch(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_maximum_shear(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.maximum_shear(x)[self.c],
|
|
||||||
mechanics.maximum_shear(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_Mises_strain(self):
|
|
||||||
epsilon = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.Mises_strain(epsilon)[self.c],
|
|
||||||
mechanics.Mises_strain(epsilon[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_Mises_stress(self):
|
|
||||||
sigma = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.Mises_stress(sigma)[self.c],
|
|
||||||
mechanics.Mises_stress(sigma[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_PK2(self):
|
|
||||||
F = np.random.random((self.n,3,3))
|
|
||||||
P = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.PK2(P,F)[self.c],
|
|
||||||
mechanics.PK2(P[self.c],F[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_right_stretch(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.right_stretch(x)[self.c],
|
|
||||||
mechanics.right_stretch(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_rotational_part(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.rotational_part(x)[self.c],
|
|
||||||
mechanics.rotational_part(x[self.c]))
|
|
||||||
|
|
||||||
def test_vectorize_spherical_part(self):
|
|
||||||
x = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.spherical_part(x,True)[self.c],
|
|
||||||
mechanics.spherical_part(x[self.c],True))
|
|
||||||
|
|
||||||
def test_vectorize_strain_tensor(self):
|
def test_vectorize_strain_tensor(self):
|
||||||
F = np.random.random((self.n,3,3))
|
F = np.random.rand(self.n,3,3)
|
||||||
t = ['V','U'][np.random.randint(0,2)]
|
t = ['V','U'][np.random.randint(0,2)]
|
||||||
m = np.random.random()*10. -5.0
|
m = np.random.random()*10. -5.0
|
||||||
assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c],
|
assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c],
|
||||||
mechanics.strain_tensor(F[self.c],t,m))
|
mechanics.strain_tensor(F[self.c],t,m))
|
||||||
|
|
||||||
def test_vectorize_symmetric(self):
|
@pytest.mark.parametrize('function',[mechanics.Cauchy,
|
||||||
x = np.random.random((self.n,3,3))
|
mechanics.PK2,
|
||||||
assert np.allclose(mechanics.symmetric(x)[self.c],
|
])
|
||||||
mechanics.symmetric(x[self.c]))
|
def test_stress_measures(self,function):
|
||||||
|
"""Ensure that all stress measures are equivalent for no deformation."""
|
||||||
def test_vectorize_transpose(self):
|
P = np.random.rand(self.n,3,3)
|
||||||
x = np.random.random((self.n,3,3))
|
assert np.allclose(function(P,np.broadcast_to(np.eye(3),(self.n,3,3))),mechanics.symmetric(P))
|
||||||
assert np.allclose(mechanics.transpose(x)[self.c],
|
|
||||||
mechanics.transpose(x[self.c]))
|
|
||||||
|
|
||||||
|
|
||||||
def test_Cauchy(self):
|
|
||||||
"""Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation."""
|
|
||||||
P = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.Cauchy(P,np.broadcast_to(np.eye(3),(self.n,3,3))),
|
|
||||||
mechanics.symmetric(P))
|
|
||||||
|
|
||||||
|
def test_deviatoric_part(self):
|
||||||
|
I_n = np.broadcast_to(np.eye(3),(self.n,3,3))
|
||||||
|
r = np.logical_not(I_n)*np.random.rand(self.n,3,3)
|
||||||
|
assert np.allclose(mechanics.deviatoric_part(I_n+r),r)
|
||||||
|
|
||||||
def test_polar_decomposition(self):
|
def test_polar_decomposition(self):
|
||||||
"""F = RU = VR."""
|
"""F = RU = VR."""
|
||||||
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3))
|
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.rand(self.n,3,3)
|
||||||
R = mechanics.rotational_part(F)
|
R = mechanics.rotational_part(F)
|
||||||
V = mechanics.left_stretch(F)
|
V = mechanics.left_stretch(F)
|
||||||
U = mechanics.right_stretch(F)
|
U = mechanics.right_stretch(F)
|
||||||
assert np.allclose(np.matmul(R,U),
|
assert np.allclose(np.matmul(R,U),
|
||||||
np.matmul(V,R))
|
np.matmul(V,R))
|
||||||
|
|
||||||
|
|
||||||
def test_PK2(self):
|
|
||||||
"""Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation."""
|
|
||||||
P = np.random.random((self.n,3,3))
|
|
||||||
assert np.allclose(mechanics.PK2(P,np.broadcast_to(np.eye(3),(self.n,3,3))),
|
|
||||||
mechanics.symmetric(P))
|
|
||||||
|
|
||||||
|
|
||||||
def test_strain_tensor_no_rotation(self):
|
def test_strain_tensor_no_rotation(self):
|
||||||
"""Ensure that left and right stretch give same results for no rotation."""
|
"""Ensure that left and right stretch give same results for no rotation."""
|
||||||
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3))
|
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.rand(self.n,3,3)
|
||||||
m = np.random.random()*20.0-10.0
|
m = np.random.random()*20.0-10.0
|
||||||
assert np.allclose(mechanics.strain_tensor(F,'U',m),
|
assert np.allclose(mechanics.strain_tensor(F,'U',m),
|
||||||
mechanics.strain_tensor(F,'V',m))
|
mechanics.strain_tensor(F,'V',m))
|
||||||
|
|
||||||
def test_strain_tensor_rotation_equivalence(self):
|
def test_strain_tensor_rotation_equivalence(self):
|
||||||
"""Ensure that left and right strain differ only by a rotation."""
|
"""Ensure that left and right strain differ only by a rotation."""
|
||||||
F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25)
|
F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.rand(self.n,3,3)*0.5 - 0.25)
|
||||||
m = np.random.random()*5.0-2.5
|
m = np.random.random()*5.0-2.5
|
||||||
assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)),
|
assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)),
|
||||||
np.linalg.det(mechanics.strain_tensor(F,'V',m)))
|
np.linalg.det(mechanics.strain_tensor(F,'V',m)))
|
||||||
|
|
||||||
def test_strain_tensor_rotation(self):
|
def test_strain_tensor_rotation(self):
|
||||||
"""Ensure that pure rotation results in no strain."""
|
"""Ensure that pure rotation results in no strain."""
|
||||||
F = mechanics.rotational_part(np.random.random((self.n,3,3)))
|
F = mechanics.rotational_part(np.random.rand(self.n,3,3))
|
||||||
t = ['V','U'][np.random.randint(0,2)]
|
t = ['V','U'][np.random.randint(0,2)]
|
||||||
m = np.random.random()*2.0 - 1.0
|
m = np.random.random()*2.0 - 1.0
|
||||||
assert np.allclose(mechanics.strain_tensor(F,t,m),
|
assert np.allclose(mechanics.strain_tensor(F,t,m),
|
||||||
|
@ -139,21 +91,20 @@ class TestMechanics:
|
||||||
|
|
||||||
Should be +1, but random F might contain a reflection.
|
Should be +1, but random F might contain a reflection.
|
||||||
"""
|
"""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))),
|
assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))),
|
||||||
1.0)
|
1.0)
|
||||||
|
|
||||||
|
|
||||||
def test_spherical_deviatoric_part(self):
|
def test_spherical_deviatoric_part(self):
|
||||||
"""Ensure that full tensor is sum of spherical and deviatoric part."""
|
"""Ensure that full tensor is sum of spherical and deviatoric part."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
sph = mechanics.spherical_part(x,True)
|
sph = mechanics.spherical_part(x,True)
|
||||||
assert np.allclose(sph + mechanics.deviatoric_part(x),
|
assert np.allclose(sph + mechanics.deviatoric_part(x),
|
||||||
x)
|
x)
|
||||||
|
|
||||||
def test_deviatoric_Mises(self):
|
def test_deviatoric_Mises(self):
|
||||||
"""Ensure that Mises equivalent stress depends only on deviatoric part."""
|
"""Ensure that Mises equivalent stress depends only on deviatoric part."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
full = mechanics.Mises_stress(x)
|
full = mechanics.Mises_stress(x)
|
||||||
dev = mechanics.Mises_stress(mechanics.deviatoric_part(x))
|
dev = mechanics.Mises_stress(mechanics.deviatoric_part(x))
|
||||||
assert np.allclose(full,
|
assert np.allclose(full,
|
||||||
|
@ -161,7 +112,7 @@ class TestMechanics:
|
||||||
|
|
||||||
def test_spherical_mapping(self):
|
def test_spherical_mapping(self):
|
||||||
"""Ensure that mapping to tensor is correct."""
|
"""Ensure that mapping to tensor is correct."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
tensor = mechanics.spherical_part(x,True)
|
tensor = mechanics.spherical_part(x,True)
|
||||||
scalar = mechanics.spherical_part(x)
|
scalar = mechanics.spherical_part(x)
|
||||||
assert np.allclose(np.linalg.det(tensor),
|
assert np.allclose(np.linalg.det(tensor),
|
||||||
|
@ -169,35 +120,32 @@ class TestMechanics:
|
||||||
|
|
||||||
def test_spherical_Mises(self):
|
def test_spherical_Mises(self):
|
||||||
"""Ensure that Mises equivalent strrain of spherical strain is 0."""
|
"""Ensure that Mises equivalent strrain of spherical strain is 0."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
sph = mechanics.spherical_part(x,True)
|
sph = mechanics.spherical_part(x,True)
|
||||||
assert np.allclose(mechanics.Mises_strain(sph),
|
assert np.allclose(mechanics.Mises_strain(sph),
|
||||||
0.0)
|
0.0)
|
||||||
|
|
||||||
def test_symmetric(self):
|
def test_symmetric(self):
|
||||||
"""Ensure that a symmetric tensor is half of the sum of a tensor and its transpose."""
|
"""Ensure that a symmetric tensor is half of the sum of a tensor and its transpose."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
assert np.allclose(mechanics.symmetric(x)*2.0,
|
assert np.allclose(mechanics.symmetric(x)*2.0,
|
||||||
mechanics.transpose(x)+x)
|
mechanics.transpose(x)+x)
|
||||||
|
|
||||||
|
|
||||||
def test_transpose(self):
|
def test_transpose(self):
|
||||||
"""Ensure that a symmetric tensor equals its transpose."""
|
"""Ensure that a symmetric tensor equals its transpose."""
|
||||||
x = mechanics.symmetric(np.random.random((self.n,3,3)))
|
x = mechanics.symmetric(np.random.rand(self.n,3,3))
|
||||||
assert np.allclose(mechanics.transpose(x),
|
assert np.allclose(mechanics.transpose(x),
|
||||||
x)
|
x)
|
||||||
|
|
||||||
|
|
||||||
def test_Mises(self):
|
def test_Mises(self):
|
||||||
"""Ensure that equivalent stress is 3/2 of equivalent strain."""
|
"""Ensure that equivalent stress is 3/2 of equivalent strain."""
|
||||||
x = np.random.random((self.n,3,3))
|
x = np.random.rand(self.n,3,3)
|
||||||
assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x),
|
assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x),
|
||||||
1.5)
|
1.5)
|
||||||
|
|
||||||
|
|
||||||
def test_eigenvalues(self):
|
def test_eigenvalues(self):
|
||||||
"""Ensure that the characteristic polynomial can be solved."""
|
"""Ensure that the characteristic polynomial can be solved."""
|
||||||
A = mechanics.symmetric(np.random.random((self.n,3,3)))
|
A = mechanics.symmetric(np.random.rand(self.n,3,3))
|
||||||
lambd = mechanics.eigenvalues(A)
|
lambd = mechanics.eigenvalues(A)
|
||||||
s = np.random.randint(self.n)
|
s = np.random.randint(self.n)
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
|
@ -205,7 +153,7 @@ class TestMechanics:
|
||||||
|
|
||||||
def test_eigenvalues_and_vectors(self):
|
def test_eigenvalues_and_vectors(self):
|
||||||
"""Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial."""
|
"""Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial."""
|
||||||
A = mechanics.symmetric(np.random.random((self.n,3,3)))
|
A = mechanics.symmetric(np.random.rand(self.n,3,3))
|
||||||
lambd = mechanics.eigenvalues(A)
|
lambd = mechanics.eigenvalues(A)
|
||||||
x = mechanics.eigenvectors(A)
|
x = mechanics.eigenvectors(A)
|
||||||
s = np.random.randint(self.n)
|
s = np.random.randint(self.n)
|
||||||
|
@ -214,12 +162,12 @@ class TestMechanics:
|
||||||
|
|
||||||
def test_eigenvectors_RHS(self):
|
def test_eigenvectors_RHS(self):
|
||||||
"""Ensure that RHS coordinate system does only change sign of determinant."""
|
"""Ensure that RHS coordinate system does only change sign of determinant."""
|
||||||
A = mechanics.symmetric(np.random.random((self.n,3,3)))
|
A = mechanics.symmetric(np.random.rand(self.n,3,3))
|
||||||
LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False))
|
LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False))
|
||||||
RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True))
|
RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True))
|
||||||
assert np.allclose(np.abs(LRHS),RHS)
|
assert np.allclose(np.abs(LRHS),RHS)
|
||||||
|
|
||||||
def test_spherical_no_shear(self):
|
def test_spherical_no_shear(self):
|
||||||
"""Ensure that sherical stress has max shear of 0.0."""
|
"""Ensure that sherical stress has max shear of 0.0."""
|
||||||
A = mechanics.spherical_part(mechanics.symmetric(np.random.random((self.n,3,3))),True)
|
A = mechanics.spherical_part(mechanics.symmetric(np.random.rand(self.n,3,3)),True)
|
||||||
assert np.allclose(mechanics.maximum_shear(A),0.0)
|
assert np.allclose(mechanics.maximum_shear(A),0.0)
|
||||||
|
|
|
@ -10,6 +10,7 @@ module CPFEM
|
||||||
use FEsolving
|
use FEsolving
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
use YAML_types
|
||||||
use discretization_marc
|
use discretization_marc
|
||||||
use material
|
use material
|
||||||
use config
|
use config
|
||||||
|
@ -73,28 +74,25 @@ subroutine CPFEM_initAll(el,ip)
|
||||||
integer(pInt), intent(in) :: el, & !< FE el number
|
integer(pInt), intent(in) :: el, & !< FE el number
|
||||||
ip !< FE integration point number
|
ip !< FE integration point number
|
||||||
|
|
||||||
!$OMP CRITICAL(init)
|
CPFEM_init_done = .true.
|
||||||
if (.not. CPFEM_init_done) then
|
call DAMASK_interface_init
|
||||||
call DAMASK_interface_init
|
call prec_init
|
||||||
call prec_init
|
call IO_init
|
||||||
call IO_init
|
call numerics_init
|
||||||
call numerics_init
|
call debug_init
|
||||||
call debug_init
|
call config_init
|
||||||
call config_init
|
call math_init
|
||||||
call math_init
|
call rotations_init
|
||||||
call rotations_init
|
call YAML_types_init
|
||||||
call HDF5_utilities_init
|
call HDF5_utilities_init
|
||||||
call results_init
|
call results_init
|
||||||
call discretization_marc_init(ip, el)
|
call discretization_marc_init(ip, el)
|
||||||
call lattice_init
|
call lattice_init
|
||||||
call material_init
|
call material_init
|
||||||
call constitutive_init
|
call constitutive_init
|
||||||
call crystallite_init
|
call crystallite_init
|
||||||
call homogenization_init
|
call homogenization_init
|
||||||
call CPFEM_init
|
call CPFEM_init
|
||||||
CPFEM_init_done = .true.
|
|
||||||
endif
|
|
||||||
!$OMP END CRITICAL(init)
|
|
||||||
|
|
||||||
end subroutine CPFEM_initAll
|
end subroutine CPFEM_initAll
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ module CPFEM2
|
||||||
use FEsolving
|
use FEsolving
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
use YAML_types
|
||||||
use material
|
use material
|
||||||
use lattice
|
use lattice
|
||||||
use IO
|
use IO
|
||||||
|
@ -50,6 +51,7 @@ subroutine CPFEM_initAll
|
||||||
call config_init
|
call config_init
|
||||||
call math_init
|
call math_init
|
||||||
call rotations_init
|
call rotations_init
|
||||||
|
call YAML_types_init
|
||||||
call lattice_init
|
call lattice_init
|
||||||
call HDF5_utilities_init
|
call HDF5_utilities_init
|
||||||
call results_init
|
call results_init
|
||||||
|
|
|
@ -261,11 +261,10 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!$ defaultNumThreadsInt = omp_get_num_threads() ! remember number of threads set by Marc
|
!$ defaultNumThreadsInt = omp_get_num_threads() ! remember number of threads set by Marc
|
||||||
|
!$ call omp_set_num_threads(DAMASK_NumThreadsInt) ! set number of threads for parallel execution set by DAMASK_NUM_THREADS
|
||||||
|
|
||||||
if (.not. CPFEM_init_done) call CPFEM_initAll(m(1),nn)
|
if (.not. CPFEM_init_done) call CPFEM_initAll(m(1),nn)
|
||||||
|
|
||||||
!$ call omp_set_num_threads(DAMASK_NumThreadsInt) ! set number of threads for parallel execution set by DAMASK_NUM_THREADS
|
|
||||||
|
|
||||||
computationMode = 0 ! save initialization value, since it does not result in any calculation
|
computationMode = 0 ! save initialization value, since it does not result in any calculation
|
||||||
if (lovl == 4 ) then ! jacobian requested by marc
|
if (lovl == 4 ) then ! jacobian requested by marc
|
||||||
if (timinc < theDelta .and. theInc == inc .and. lastLovl /= lovl) & ! first after cutback
|
if (timinc < theDelta .and. theInc == inc .and. lastLovl /= lovl) & ! first after cutback
|
||||||
|
|
174
src/IO.f90
174
src/IO.f90
|
@ -29,9 +29,13 @@ module IO
|
||||||
IO_getTag, &
|
IO_getTag, &
|
||||||
IO_stringPos, &
|
IO_stringPos, &
|
||||||
IO_stringValue, &
|
IO_stringValue, &
|
||||||
IO_floatValue, &
|
|
||||||
IO_intValue, &
|
IO_intValue, &
|
||||||
|
IO_floatValue, &
|
||||||
IO_lc, &
|
IO_lc, &
|
||||||
|
IO_rmComment, &
|
||||||
|
IO_stringAsInt, &
|
||||||
|
IO_stringAsFloat, &
|
||||||
|
IO_stringAsBool, &
|
||||||
IO_error, &
|
IO_error, &
|
||||||
IO_warning
|
IO_warning
|
||||||
|
|
||||||
|
@ -250,7 +254,7 @@ integer function IO_intValue(string,chunkPos,myChunk)
|
||||||
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
|
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
|
||||||
integer, intent(in) :: myChunk !< position number of desired chunk
|
integer, intent(in) :: myChunk !< position number of desired chunk
|
||||||
|
|
||||||
IO_intValue = verifyIntValue(IO_stringValue(string,chunkPos,myChunk))
|
IO_intValue = IO_stringAsInt(IO_stringValue(string,chunkPos,myChunk))
|
||||||
|
|
||||||
end function IO_intValue
|
end function IO_intValue
|
||||||
|
|
||||||
|
@ -264,7 +268,7 @@ real(pReal) function IO_floatValue(string,chunkPos,myChunk)
|
||||||
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
|
integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string
|
||||||
integer, intent(in) :: myChunk !< position number of desired chunk
|
integer, intent(in) :: myChunk !< position number of desired chunk
|
||||||
|
|
||||||
IO_floatValue = verifyFloatValue(IO_stringValue(string,chunkPos,myChunk))
|
IO_floatValue = IO_stringAsFloat(IO_stringValue(string,chunkPos,myChunk))
|
||||||
|
|
||||||
end function IO_floatValue
|
end function IO_floatValue
|
||||||
|
|
||||||
|
@ -294,6 +298,88 @@ pure function IO_lc(string)
|
||||||
end function IO_lc
|
end function IO_lc
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
! @brief Remove comments (characters beyond '#') and trailing space
|
||||||
|
! ToDo: Discuss name (the trim aspect is not clear)
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
function IO_rmComment(line)
|
||||||
|
|
||||||
|
character(len=*), intent(in) :: line
|
||||||
|
character(len=:), allocatable :: IO_rmComment
|
||||||
|
integer :: split
|
||||||
|
|
||||||
|
split = index(line,IO_COMMENT)
|
||||||
|
|
||||||
|
if (split == 0) then
|
||||||
|
IO_rmComment = trim(line)
|
||||||
|
else
|
||||||
|
IO_rmComment = trim(line(:split-1))
|
||||||
|
endif
|
||||||
|
|
||||||
|
end function IO_rmComment
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief return verified integer value in given string
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
integer function IO_stringAsInt(string)
|
||||||
|
|
||||||
|
character(len=*), intent(in) :: string !< string for conversion to int value
|
||||||
|
|
||||||
|
integer :: readStatus
|
||||||
|
character(len=*), parameter :: VALIDCHARS = '0123456789+- '
|
||||||
|
|
||||||
|
valid: if (verify(string,VALIDCHARS) == 0) then
|
||||||
|
read(string,*,iostat=readStatus) IO_stringAsInt
|
||||||
|
if (readStatus /= 0) call IO_error(111,ext_msg=string)
|
||||||
|
else valid
|
||||||
|
IO_stringAsInt = 0
|
||||||
|
call IO_error(111,ext_msg=string)
|
||||||
|
endif valid
|
||||||
|
|
||||||
|
end function IO_stringAsInt
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief return verified float value in given string
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
real(pReal) function IO_stringAsFloat(string)
|
||||||
|
|
||||||
|
character(len=*), intent(in) :: string !< string for conversion to float value
|
||||||
|
|
||||||
|
integer :: readStatus
|
||||||
|
character(len=*), parameter :: VALIDCHARS = '0123456789eE.+- '
|
||||||
|
|
||||||
|
valid: if (verify(string,VALIDCHARS) == 0) then
|
||||||
|
read(string,*,iostat=readStatus) IO_stringAsFloat
|
||||||
|
if (readStatus /= 0) call IO_error(112,ext_msg=string)
|
||||||
|
else valid
|
||||||
|
IO_stringAsFloat = 0.0_pReal
|
||||||
|
call IO_error(112,ext_msg=string)
|
||||||
|
endif valid
|
||||||
|
|
||||||
|
end function IO_stringAsFloat
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief return verified logical value in given string
|
||||||
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
logical function IO_stringAsBool(string)
|
||||||
|
|
||||||
|
character(len=*), intent(in) :: string !< string for conversion to int value
|
||||||
|
|
||||||
|
if (trim(adjustl(string)) == 'True') then
|
||||||
|
IO_stringAsBool = .true.
|
||||||
|
elseif (trim(adjustl(string)) == 'False') then
|
||||||
|
IO_stringAsBool = .false.
|
||||||
|
else
|
||||||
|
IO_stringAsBool = .false.
|
||||||
|
call IO_error(113,ext_msg=string)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end function IO_stringAsBool
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief write error statements to standard out and terminate the Marc/spectral run with exit #9xxx
|
!> @brief write error statements to standard out and terminate the Marc/spectral run with exit #9xxx
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -335,7 +421,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
|
||||||
msg = 'invalid character for int:'
|
msg = 'invalid character for int:'
|
||||||
case (112)
|
case (112)
|
||||||
msg = 'invalid character for float:'
|
msg = 'invalid character for float:'
|
||||||
|
case (113)
|
||||||
|
msg = 'invalid character for logical:'
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! lattice error messages
|
! lattice error messages
|
||||||
case (130)
|
case (130)
|
||||||
|
@ -606,51 +693,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
|
||||||
end subroutine IO_warning
|
end subroutine IO_warning
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! internal helper functions
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
!> @brief returns verified integer value in given string
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
integer function verifyIntValue(string)
|
|
||||||
|
|
||||||
character(len=*), intent(in) :: string !< string for conversion to int value
|
|
||||||
|
|
||||||
integer :: readStatus
|
|
||||||
character(len=*), parameter :: VALIDCHARS = '0123456789+- '
|
|
||||||
|
|
||||||
valid: if (verify(string,VALIDCHARS) == 0) then
|
|
||||||
read(string,*,iostat=readStatus) verifyIntValue
|
|
||||||
if (readStatus /= 0) call IO_error(111,ext_msg=string)
|
|
||||||
else valid
|
|
||||||
verifyIntValue = 0
|
|
||||||
call IO_error(111,ext_msg=string)
|
|
||||||
endif valid
|
|
||||||
|
|
||||||
end function verifyIntValue
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
!> @brief returns verified float value in given string
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
real(pReal) function verifyFloatValue(string)
|
|
||||||
|
|
||||||
character(len=*), intent(in) :: string !< string for conversion to float value
|
|
||||||
|
|
||||||
integer :: readStatus
|
|
||||||
character(len=*), parameter :: VALIDCHARS = '0123456789eE.+- '
|
|
||||||
|
|
||||||
valid: if (verify(string,VALIDCHARS) == 0) then
|
|
||||||
read(string,*,iostat=readStatus) verifyFloatValue
|
|
||||||
if (readStatus /= 0) call IO_error(112,ext_msg=string)
|
|
||||||
else valid
|
|
||||||
verifyFloatValue = 0.0_pReal
|
|
||||||
call IO_error(112,ext_msg=string)
|
|
||||||
endif valid
|
|
||||||
|
|
||||||
end function verifyFloatValue
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief check correctness of some IO functions
|
!> @brief check correctness of some IO functions
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -659,14 +701,19 @@ subroutine unitTest
|
||||||
integer, dimension(:), allocatable :: chunkPos
|
integer, dimension(:), allocatable :: chunkPos
|
||||||
character(len=:), allocatable :: str
|
character(len=:), allocatable :: str
|
||||||
|
|
||||||
if(dNeq(1.0_pReal, verifyFloatValue('1.0'))) call IO_error(0,ext_msg='verifyFloatValue')
|
if(dNeq(1.0_pReal, IO_stringAsFloat('1.0'))) call IO_error(0,ext_msg='IO_stringAsFloat')
|
||||||
if(dNeq(1.0_pReal, verifyFloatValue('1e0'))) call IO_error(0,ext_msg='verifyFloatValue')
|
if(dNeq(1.0_pReal, IO_stringAsFloat('1e0'))) call IO_error(0,ext_msg='IO_stringAsFloat')
|
||||||
if(dNeq(0.1_pReal, verifyFloatValue('1e-1'))) call IO_error(0,ext_msg='verifyFloatValue')
|
if(dNeq(0.1_pReal, IO_stringAsFloat('1e-1'))) call IO_error(0,ext_msg='IO_stringAsFloat')
|
||||||
|
|
||||||
if(3112019 /= verifyIntValue( '3112019')) call IO_error(0,ext_msg='verifyIntValue')
|
if(3112019 /= IO_stringAsInt( '3112019')) call IO_error(0,ext_msg='IO_stringAsInt')
|
||||||
if(3112019 /= verifyIntValue(' 3112019')) call IO_error(0,ext_msg='verifyIntValue')
|
if(3112019 /= IO_stringAsInt(' 3112019')) call IO_error(0,ext_msg='IO_stringAsInt')
|
||||||
if(-3112019 /= verifyIntValue('-3112019')) call IO_error(0,ext_msg='verifyIntValue')
|
if(-3112019 /= IO_stringAsInt('-3112019')) call IO_error(0,ext_msg='IO_stringAsInt')
|
||||||
if(3112019 /= verifyIntValue('+3112019 ')) call IO_error(0,ext_msg='verifyIntValue')
|
if(3112019 /= IO_stringAsInt('+3112019 ')) call IO_error(0,ext_msg='IO_stringAsInt')
|
||||||
|
|
||||||
|
if(.not. IO_stringAsBool(' True')) call IO_error(0,ext_msg='IO_stringAsBool')
|
||||||
|
if(.not. IO_stringAsBool(' True ')) call IO_error(0,ext_msg='IO_stringAsBool')
|
||||||
|
if( IO_stringAsBool(' False')) call IO_error(0,ext_msg='IO_stringAsBool')
|
||||||
|
if( IO_stringAsBool('False')) call IO_error(0,ext_msg='IO_stringAsBool')
|
||||||
|
|
||||||
if(any([1,1,1] /= IO_stringPos('a'))) call IO_error(0,ext_msg='IO_stringPos')
|
if(any([1,1,1] /= IO_stringPos('a'))) call IO_error(0,ext_msg='IO_stringPos')
|
||||||
if(any([2,2,3,5,5] /= IO_stringPos(' aa b'))) call IO_error(0,ext_msg='IO_stringPos')
|
if(any([2,2,3,5,5] /= IO_stringPos(' aa b'))) call IO_error(0,ext_msg='IO_stringPos')
|
||||||
|
@ -683,6 +730,21 @@ subroutine unitTest
|
||||||
if(.not. IO_isBlank(' #isBlank')) call IO_error(0,ext_msg='IO_isBlank/2')
|
if(.not. IO_isBlank(' #isBlank')) call IO_error(0,ext_msg='IO_isBlank/2')
|
||||||
if( IO_isBlank(' i#s')) call IO_error(0,ext_msg='IO_isBlank/3')
|
if( IO_isBlank(' i#s')) call IO_error(0,ext_msg='IO_isBlank/3')
|
||||||
|
|
||||||
|
str = IO_rmComment('#')
|
||||||
|
if (str /= '' .or. len(str) /= 0) call IO_error(0,ext_msg='IO_rmComment/1')
|
||||||
|
str = IO_rmComment(' #')
|
||||||
|
if (str /= '' .or. len(str) /= 0) call IO_error(0,ext_msg='IO_rmComment/2')
|
||||||
|
str = IO_rmComment(' # ')
|
||||||
|
if (str /= '' .or. len(str) /= 0) call IO_error(0,ext_msg='IO_rmComment/3')
|
||||||
|
str = IO_rmComment(' # a')
|
||||||
|
if (str /= '' .or. len(str) /= 0) call IO_error(0,ext_msg='IO_rmComment/4')
|
||||||
|
str = IO_rmComment(' # a')
|
||||||
|
if (str /= '' .or. len(str) /= 0) call IO_error(0,ext_msg='IO_rmComment/5')
|
||||||
|
str = IO_rmComment(' a#')
|
||||||
|
if (str /= ' a' .or. len(str) /= 2) call IO_error(0,ext_msg='IO_rmComment/6')
|
||||||
|
str = IO_rmComment(' ab #')
|
||||||
|
if (str /= ' ab'.or. len(str) /= 3) call IO_error(0,ext_msg='IO_rmComment/7')
|
||||||
|
|
||||||
end subroutine unitTest
|
end subroutine unitTest
|
||||||
|
|
||||||
end module IO
|
end module IO
|
||||||
|
|
201
src/Lambert.f90
201
src/Lambert.f90
|
@ -1,201 +0,0 @@
|
||||||
! ###################################################################
|
|
||||||
! Copyright (c) 2013-2015, Marc De Graef/Carnegie Mellon University
|
|
||||||
! Modified 2017-2019, Martin Diehl/Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
! All rights reserved.
|
|
||||||
!
|
|
||||||
! Redistribution and use in source and binary forms, with or without modification, are
|
|
||||||
! permitted provided that the following conditions are met:
|
|
||||||
!
|
|
||||||
! - Redistributions of source code must retain the above copyright notice, this list
|
|
||||||
! of conditions and the following disclaimer.
|
|
||||||
! - Redistributions in binary form must reproduce the above copyright notice, this
|
|
||||||
! list of conditions and the following disclaimer in the documentation and/or
|
|
||||||
! other materials provided with the distribution.
|
|
||||||
! - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
|
|
||||||
! of its contributors may be used to endorse or promote products derived from
|
|
||||||
! this software without specific prior written permission.
|
|
||||||
!
|
|
||||||
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
||||||
! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
|
||||||
! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
! ###################################################################
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
|
||||||
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
!> @brief Mapping homochoric <-> cubochoric
|
|
||||||
!
|
|
||||||
!> @details
|
|
||||||
!> D. Rosca, A. Morawiec, and M. De Graef. “A new method of constructing a grid
|
|
||||||
!> in the space of 3D rotations and its applications to texture analysis”.
|
|
||||||
!> Modeling and Simulations in Materials Science and Engineering 22, 075013 (2014).
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
module Lambert
|
|
||||||
use prec
|
|
||||||
use math
|
|
||||||
|
|
||||||
implicit none
|
|
||||||
private
|
|
||||||
|
|
||||||
real(pReal), parameter :: &
|
|
||||||
SPI = sqrt(PI), &
|
|
||||||
PREF = sqrt(6.0_pReal/PI), &
|
|
||||||
A = PI**(5.0_pReal/6.0_pReal)/6.0_pReal**(1.0_pReal/6.0_pReal), &
|
|
||||||
AP = PI**(2.0_pReal/3.0_pReal), &
|
|
||||||
SC = A/AP, &
|
|
||||||
BETA = A/2.0_pReal, &
|
|
||||||
R1 = (3.0_pReal*PI/4.0_pReal)**(1.0_pReal/3.0_pReal), &
|
|
||||||
R2 = sqrt(2.0_pReal), &
|
|
||||||
PI12 = PI/12.0_pReal, &
|
|
||||||
PREK = R1 * 2.0_pReal**(1.0_pReal/4.0_pReal)/BETA
|
|
||||||
|
|
||||||
public :: &
|
|
||||||
Lambert_CubeToBall, &
|
|
||||||
Lambert_BallToCube
|
|
||||||
|
|
||||||
contains
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
|
||||||
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
!> @brief map from 3D cubic grid to 3D ball
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
pure function Lambert_CubeToBall(cube) result(ball)
|
|
||||||
|
|
||||||
real(pReal), intent(in), dimension(3) :: cube
|
|
||||||
real(pReal), dimension(3) :: ball, LamXYZ, XYZ
|
|
||||||
real(pReal), dimension(2) :: T
|
|
||||||
real(pReal) :: c, s, q
|
|
||||||
real(pReal), parameter :: eps = 1.0e-8_pReal
|
|
||||||
integer, dimension(3) :: p
|
|
||||||
integer, dimension(2) :: order
|
|
||||||
|
|
||||||
if (maxval(abs(cube)) > AP/2.0+eps) then
|
|
||||||
ball = IEEE_value(cube,IEEE_positive_inf)
|
|
||||||
return
|
|
||||||
end if
|
|
||||||
|
|
||||||
! transform to the sphere grid via the curved square, and intercept the zero point
|
|
||||||
center: if (all(dEq0(cube))) then
|
|
||||||
ball = 0.0_pReal
|
|
||||||
else center
|
|
||||||
! get pyramide and scale by grid parameter ratio
|
|
||||||
p = GetPyramidOrder(cube)
|
|
||||||
XYZ = cube(p) * sc
|
|
||||||
|
|
||||||
! intercept all the points along the z-axis
|
|
||||||
special: if (all(dEq0(XYZ(1:2)))) then
|
|
||||||
LamXYZ = [ 0.0_pReal, 0.0_pReal, pref * XYZ(3) ]
|
|
||||||
else special
|
|
||||||
order = merge( [2,1], [1,2], abs(XYZ(2)) <= abs(XYZ(1))) ! order of absolute values of XYZ
|
|
||||||
q = PI12 * XYZ(order(1))/XYZ(order(2)) ! smaller by larger
|
|
||||||
c = cos(q)
|
|
||||||
s = sin(q)
|
|
||||||
q = prek * XYZ(order(2))/ sqrt(R2-c)
|
|
||||||
T = [ (R2*c - 1.0), R2 * s] * q
|
|
||||||
|
|
||||||
! transform to sphere grid (inverse Lambert)
|
|
||||||
! [note that there is no need to worry about dividing by zero, since XYZ(3) can not become zero]
|
|
||||||
c = sum(T**2)
|
|
||||||
s = Pi * c/(24.0*XYZ(3)**2)
|
|
||||||
c = sPi * c / sqrt(24.0_pReal) / XYZ(3)
|
|
||||||
q = sqrt( 1.0 - s )
|
|
||||||
LamXYZ = [ T(order(2)) * q, T(order(1)) * q, pref * XYZ(3) - c ]
|
|
||||||
endif special
|
|
||||||
|
|
||||||
! reverse the coordinates back to order according to the original pyramid number
|
|
||||||
ball = LamXYZ(p)
|
|
||||||
|
|
||||||
endif center
|
|
||||||
|
|
||||||
end function Lambert_CubeToBall
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
|
||||||
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
!> @brief map from 3D ball to 3D cubic grid
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
pure function Lambert_BallToCube(xyz) result(cube)
|
|
||||||
|
|
||||||
real(pReal), intent(in), dimension(3) :: xyz
|
|
||||||
real(pReal), dimension(3) :: cube, xyz1, xyz3
|
|
||||||
real(pReal), dimension(2) :: Tinv, xyz2
|
|
||||||
real(pReal) :: rs, qxy, q2, sq2, q, tt
|
|
||||||
integer, dimension(3) :: p
|
|
||||||
|
|
||||||
rs = norm2(xyz)
|
|
||||||
if (rs > R1) then
|
|
||||||
cube = IEEE_value(cube,IEEE_positive_inf)
|
|
||||||
return
|
|
||||||
endif
|
|
||||||
|
|
||||||
center: if (all(dEq0(xyz))) then
|
|
||||||
cube = 0.0_pReal
|
|
||||||
else center
|
|
||||||
p = GetPyramidOrder(xyz)
|
|
||||||
xyz3 = xyz(p)
|
|
||||||
|
|
||||||
! inverse M_3
|
|
||||||
xyz2 = xyz3(1:2) * sqrt( 2.0*rs/(rs+abs(xyz3(3))) )
|
|
||||||
|
|
||||||
! inverse M_2
|
|
||||||
qxy = sum(xyz2**2)
|
|
||||||
|
|
||||||
special: if (dEq0(qxy)) then
|
|
||||||
Tinv = 0.0_pReal
|
|
||||||
else special
|
|
||||||
q2 = qxy + maxval(abs(xyz2))**2
|
|
||||||
sq2 = sqrt(q2)
|
|
||||||
q = (beta/R2/R1) * sqrt(q2*qxy/(q2-maxval(abs(xyz2))*sq2))
|
|
||||||
tt = (minval(abs(xyz2))**2+maxval(abs(xyz2))*sq2)/R2/qxy
|
|
||||||
Tinv = q * sign(1.0_pReal,xyz2) * merge([ 1.0_pReal, acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12], &
|
|
||||||
[ acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12, 1.0_pReal], &
|
|
||||||
abs(xyz2(2)) <= abs(xyz2(1)))
|
|
||||||
endif special
|
|
||||||
|
|
||||||
! inverse M_1
|
|
||||||
xyz1 = [ Tinv(1), Tinv(2), sign(1.0_pReal,xyz3(3)) * rs / pref ] /sc
|
|
||||||
|
|
||||||
! reverse the coordinates back to order according to the original pyramid number
|
|
||||||
cube = xyz1(p)
|
|
||||||
|
|
||||||
endif center
|
|
||||||
|
|
||||||
end function Lambert_BallToCube
|
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
|
||||||
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
|
||||||
!> @brief determine to which pyramid a point in a cubic grid belongs
|
|
||||||
!--------------------------------------------------------------------------
|
|
||||||
pure function GetPyramidOrder(xyz)
|
|
||||||
|
|
||||||
real(pReal),intent(in),dimension(3) :: xyz
|
|
||||||
integer, dimension(3) :: GetPyramidOrder
|
|
||||||
|
|
||||||
if (((abs(xyz(1)) <= xyz(3)).and.(abs(xyz(2)) <= xyz(3))) .or. &
|
|
||||||
((abs(xyz(1)) <= -xyz(3)).and.(abs(xyz(2)) <= -xyz(3)))) then
|
|
||||||
GetPyramidOrder = [1,2,3]
|
|
||||||
else if (((abs(xyz(3)) <= xyz(1)).and.(abs(xyz(2)) <= xyz(1))) .or. &
|
|
||||||
((abs(xyz(3)) <= -xyz(1)).and.(abs(xyz(2)) <= -xyz(1)))) then
|
|
||||||
GetPyramidOrder = [2,3,1]
|
|
||||||
else if (((abs(xyz(1)) <= xyz(2)).and.(abs(xyz(3)) <= xyz(2))) .or. &
|
|
||||||
((abs(xyz(1)) <= -xyz(2)).and.(abs(xyz(3)) <= -xyz(2)))) then
|
|
||||||
GetPyramidOrder = [3,1,2]
|
|
||||||
else
|
|
||||||
GetPyramidOrder = -1 ! should be impossible, but might simplify debugging
|
|
||||||
end if
|
|
||||||
|
|
||||||
end function GetPyramidOrder
|
|
||||||
|
|
||||||
end module Lambert
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,12 +7,12 @@
|
||||||
#include "numerics.f90"
|
#include "numerics.f90"
|
||||||
#include "debug.f90"
|
#include "debug.f90"
|
||||||
#include "list.f90"
|
#include "list.f90"
|
||||||
|
#include "YAML_types.f90"
|
||||||
#include "future.f90"
|
#include "future.f90"
|
||||||
#include "config.f90"
|
#include "config.f90"
|
||||||
#include "LAPACK_interface.f90"
|
#include "LAPACK_interface.f90"
|
||||||
#include "math.f90"
|
#include "math.f90"
|
||||||
#include "quaternions.f90"
|
#include "quaternions.f90"
|
||||||
#include "Lambert.f90"
|
|
||||||
#include "rotations.f90"
|
#include "rotations.f90"
|
||||||
#include "FEsolving.f90"
|
#include "FEsolving.f90"
|
||||||
#include "element.f90"
|
#include "element.f90"
|
||||||
|
|
|
@ -327,7 +327,7 @@ module constitutive
|
||||||
constitutive_initialFi, &
|
constitutive_initialFi, &
|
||||||
constitutive_SandItsTangents, &
|
constitutive_SandItsTangents, &
|
||||||
constitutive_collectDotState, &
|
constitutive_collectDotState, &
|
||||||
constitutive_collectDeltaState, &
|
constitutive_deltaState, &
|
||||||
constitutive_results
|
constitutive_results
|
||||||
|
|
||||||
contains
|
contains
|
||||||
|
@ -709,12 +709,14 @@ end subroutine constitutive_hooke_SandItsTangents
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
|
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip, el)
|
function constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip, el,phase,of) result(broken)
|
||||||
|
|
||||||
integer, intent(in) :: &
|
integer, intent(in) :: &
|
||||||
ipc, & !< component-ID of integration point
|
ipc, & !< component-ID of integration point
|
||||||
ip, & !< integration point
|
ip, & !< integration point
|
||||||
el !< element
|
el, & !< element
|
||||||
|
phase, &
|
||||||
|
of
|
||||||
real(pReal), intent(in) :: &
|
real(pReal), intent(in) :: &
|
||||||
subdt !< timestep
|
subdt !< timestep
|
||||||
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem) :: &
|
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem) :: &
|
||||||
|
@ -730,16 +732,16 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
|
||||||
ho, & !< homogenization
|
ho, & !< homogenization
|
||||||
tme, & !< thermal member position
|
tme, & !< thermal member position
|
||||||
i, & !< counter in source loop
|
i, & !< counter in source loop
|
||||||
instance, of
|
instance
|
||||||
|
logical :: broken
|
||||||
|
|
||||||
ho = material_homogenizationAt(el)
|
ho = material_homogenizationAt(el)
|
||||||
tme = thermalMapping(ho)%p(ip,el)
|
tme = thermalMapping(ho)%p(ip,el)
|
||||||
of = material_phasememberAt(ipc,ip,el)
|
instance = phase_plasticityInstance(phase)
|
||||||
instance = phase_plasticityInstance(material_phaseAt(ipc,el))
|
|
||||||
|
|
||||||
Mp = matmul(matmul(transpose(Fi),Fi),S)
|
Mp = matmul(matmul(transpose(Fi),Fi),S)
|
||||||
|
|
||||||
plasticityType: select case (phase_plasticity(material_phaseAt(ipc,el)))
|
plasticityType: select case (phase_plasticity(phase))
|
||||||
|
|
||||||
case (PLASTICITY_ISOTROPIC_ID) plasticityType
|
case (PLASTICITY_ISOTROPIC_ID) plasticityType
|
||||||
call plastic_isotropic_dotState (Mp,instance,of)
|
call plastic_isotropic_dotState (Mp,instance,of)
|
||||||
|
@ -760,10 +762,11 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
|
||||||
call plastic_nonlocal_dotState (Mp,FArray,FpArray,temperature(ho)%p(tme),subdt, &
|
call plastic_nonlocal_dotState (Mp,FArray,FpArray,temperature(ho)%p(tme),subdt, &
|
||||||
instance,of,ip,el)
|
instance,of,ip,el)
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
broken = any(IEEE_is_NaN(plasticState(phase)%dotState(:,of)))
|
||||||
|
|
||||||
SourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el))
|
SourceLoop: do i = 1, phase_Nsources(phase)
|
||||||
|
|
||||||
sourceType: select case (phase_source(i,material_phaseAt(ipc,el)))
|
sourceType: select case (phase_source(i,phase))
|
||||||
|
|
||||||
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
case (SOURCE_damage_anisoBrittle_ID) sourceType
|
||||||
call source_damage_anisoBrittle_dotState (S, ipc, ip, el) !< correct stress?
|
call source_damage_anisoBrittle_dotState (S, ipc, ip, el) !< correct stress?
|
||||||
|
@ -775,25 +778,29 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
|
||||||
call source_damage_anisoDuctile_dotState ( ipc, ip, el)
|
call source_damage_anisoDuctile_dotState ( ipc, ip, el)
|
||||||
|
|
||||||
case (SOURCE_thermal_externalheat_ID) sourceType
|
case (SOURCE_thermal_externalheat_ID) sourceType
|
||||||
call source_thermal_externalheat_dotState(material_phaseAt(ipc,el),of)
|
call source_thermal_externalheat_dotState(phase,of)
|
||||||
|
|
||||||
end select sourceType
|
end select sourceType
|
||||||
|
|
||||||
|
broken = broken .or. any(IEEE_is_NaN(sourceState(phase)%p(i)%dotState(:,of)))
|
||||||
|
|
||||||
enddo SourceLoop
|
enddo SourceLoop
|
||||||
|
|
||||||
end subroutine constitutive_collectDotState
|
end function constitutive_collectDotState
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief for constitutive models having an instantaneous change of state
|
!> @brief for constitutive models having an instantaneous change of state
|
||||||
!> will return false if delta state is not needed/supported by the constitutive model
|
!> will return false if delta state is not needed/supported by the constitutive model
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
|
function constitutive_deltaState(S, Fe, Fi, ipc, ip, el, phase, of) result(broken)
|
||||||
|
|
||||||
integer, intent(in) :: &
|
integer, intent(in) :: &
|
||||||
ipc, & !< component-ID of integration point
|
ipc, & !< component-ID of integration point
|
||||||
ip, & !< integration point
|
ip, & !< integration point
|
||||||
el !< element
|
el, & !< element
|
||||||
|
phase, &
|
||||||
|
of
|
||||||
real(pReal), intent(in), dimension(3,3) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
S, & !< 2nd Piola Kirchhoff stress
|
S, & !< 2nd Piola Kirchhoff stress
|
||||||
Fe, & !< elastic deformation gradient
|
Fe, & !< elastic deformation gradient
|
||||||
|
@ -802,35 +809,62 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
|
||||||
Mp
|
Mp
|
||||||
integer :: &
|
integer :: &
|
||||||
i, &
|
i, &
|
||||||
instance, of
|
instance, &
|
||||||
|
myOffset, &
|
||||||
|
mySize
|
||||||
|
logical :: &
|
||||||
|
broken
|
||||||
|
|
||||||
Mp = matmul(matmul(transpose(Fi),Fi),S)
|
Mp = matmul(matmul(transpose(Fi),Fi),S)
|
||||||
of = material_phasememberAt(ipc,ip,el)
|
instance = phase_plasticityInstance(phase)
|
||||||
instance = phase_plasticityInstance(material_phaseAt(ipc,el))
|
|
||||||
|
|
||||||
plasticityType: select case (phase_plasticity(material_phaseAt(ipc,el)))
|
plasticityType: select case (phase_plasticity(phase))
|
||||||
|
|
||||||
case (PLASTICITY_KINEHARDENING_ID) plasticityType
|
case (PLASTICITY_KINEHARDENING_ID) plasticityType
|
||||||
call plastic_kinehardening_deltaState(Mp,instance,of)
|
call plastic_kinehardening_deltaState(Mp,instance,of)
|
||||||
|
broken = any(IEEE_is_NaN(plasticState(phase)%deltaState(:,of)))
|
||||||
|
|
||||||
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
case (PLASTICITY_NONLOCAL_ID) plasticityType
|
||||||
call plastic_nonlocal_deltaState(Mp,instance,of,ip,el)
|
call plastic_nonlocal_deltaState(Mp,instance,of,ip,el)
|
||||||
|
broken = any(IEEE_is_NaN(plasticState(phase)%deltaState(:,of)))
|
||||||
|
|
||||||
|
case default
|
||||||
|
broken = .false.
|
||||||
|
|
||||||
end select plasticityType
|
end select plasticityType
|
||||||
|
|
||||||
sourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el))
|
if(.not. broken) then
|
||||||
|
select case(phase_plasticity(phase))
|
||||||
|
case (PLASTICITY_NONLOCAL_ID,PLASTICITY_KINEHARDENING_ID)
|
||||||
|
|
||||||
sourceType: select case (phase_source(i,material_phaseAt(ipc,el)))
|
myOffset = plasticState(phase)%offsetDeltaState
|
||||||
|
mySize = plasticState(phase)%sizeDeltaState
|
||||||
|
plasticState(phase)%state(myOffset + 1:myOffset + mySize,of) = &
|
||||||
|
plasticState(phase)%state(myOffset + 1:myOffset + mySize,of) + plasticState(phase)%deltaState(1:mySize,of)
|
||||||
|
end select
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
sourceLoop: do i = 1, phase_Nsources(phase)
|
||||||
|
|
||||||
|
sourceType: select case (phase_source(i,phase))
|
||||||
|
|
||||||
case (SOURCE_damage_isoBrittle_ID) sourceType
|
case (SOURCE_damage_isoBrittle_ID) sourceType
|
||||||
call source_damage_isoBrittle_deltaState (constitutive_homogenizedC(ipc,ip,el), Fe, &
|
call source_damage_isoBrittle_deltaState (constitutive_homogenizedC(ipc,ip,el), Fe, &
|
||||||
ipc, ip, el)
|
ipc, ip, el)
|
||||||
|
broken = broken .or. any(IEEE_is_NaN(sourceState(phase)%p(i)%deltaState(:,of)))
|
||||||
|
if(.not. broken) then
|
||||||
|
myOffset = sourceState(phase)%p(i)%offsetDeltaState
|
||||||
|
mySize = sourceState(phase)%p(i)%sizeDeltaState
|
||||||
|
sourceState(phase)%p(i)%state(myOffset + 1: myOffset + mySize,of) = &
|
||||||
|
sourceState(phase)%p(i)%state(myOffset + 1: myOffset + mySize,of) + sourceState(phase)%p(i)%deltaState(1:mySize,of)
|
||||||
|
endif
|
||||||
|
|
||||||
end select sourceType
|
end select sourceType
|
||||||
|
|
||||||
enddo SourceLoop
|
enddo SourceLoop
|
||||||
|
|
||||||
end subroutine constitutive_collectDeltaState
|
end function constitutive_deltaState
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -209,7 +209,7 @@ module subroutine plastic_disloUCLA_init
|
||||||
sizeDotState = size(['rho_mob ','rho_dip ','gamma_sl']) * prm%sum_N_sl
|
sizeDotState = size(['rho_mob ','rho_dip ','gamma_sl']) * prm%sum_N_sl
|
||||||
sizeState = sizeDotState
|
sizeState = sizeDotState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state aliases and initialization
|
! state aliases and initialization
|
||||||
|
|
|
@ -399,7 +399,7 @@ module subroutine plastic_dislotwin_init
|
||||||
+ size(['f_tr']) * prm%sum_N_tr
|
+ size(['f_tr']) * prm%sum_N_tr
|
||||||
sizeState = sizeDotState
|
sizeState = sizeDotState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! locally defined state aliases and initialization of state0 and atol
|
! locally defined state aliases and initialization of state0 and atol
|
||||||
|
|
|
@ -117,7 +117,7 @@ module subroutine plastic_isotropic_init
|
||||||
sizeDotState = size(['xi ','accumulated_shear'])
|
sizeDotState = size(['xi ','accumulated_shear'])
|
||||||
sizeState = sizeDotState
|
sizeState = sizeDotState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state aliases and initialization
|
! state aliases and initialization
|
||||||
|
|
|
@ -164,7 +164,7 @@ module subroutine plastic_kinehardening_init
|
||||||
sizeDeltaState = size(['sense ', 'chi0 ', 'gamma0' ]) * prm%sum_N_sl
|
sizeDeltaState = size(['sense ', 'chi0 ', 'gamma0' ]) * prm%sum_N_sl
|
||||||
sizeState = sizeDotState + sizeDeltaState
|
sizeState = sizeDotState + sizeDeltaState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state aliases and initialization
|
! state aliases and initialization
|
||||||
|
|
|
@ -29,7 +29,7 @@ module subroutine plastic_none_init
|
||||||
if (phase_plasticity(p) /= PLASTICITY_NONE_ID) cycle
|
if (phase_plasticity(p) /= PLASTICITY_NONE_ID) cycle
|
||||||
|
|
||||||
NipcMyPhase = count(material_phaseAt == p) * discretization_nIP
|
NipcMyPhase = count(material_phaseAt == p) * discretization_nIP
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,0,0,0)
|
call material_allocateState(plasticState(p),NipcMyPhase,0,0,0)
|
||||||
|
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
|
|
@ -320,6 +320,7 @@ module subroutine plastic_nonlocal_init
|
||||||
prm%fEdgeMultiplication = config%getFloat('edgemultiplication')
|
prm%fEdgeMultiplication = config%getFloat('edgemultiplication')
|
||||||
prm%shortRangeStressCorrection = config%keyExists('/shortrangestresscorrection/')
|
prm%shortRangeStressCorrection = config%keyExists('/shortrangestresscorrection/')
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! sanity checks
|
! sanity checks
|
||||||
if (any(prm%burgers < 0.0_pReal)) extmsg = trim(extmsg)//' burgers'
|
if (any(prm%burgers < 0.0_pReal)) extmsg = trim(extmsg)//' burgers'
|
||||||
|
@ -384,9 +385,9 @@ module subroutine plastic_nonlocal_init
|
||||||
'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%sum_N_sl !< other dependent state variables that are not updated by microstructure
|
'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%sum_N_sl !< other dependent state variables that are not updated by microstructure
|
||||||
sizeDeltaState = sizeDotState
|
sizeDeltaState = sizeDotState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
|
||||||
|
|
||||||
plasticState(p)%nonlocal = .true.
|
plasticState(p)%nonlocal = config%KeyExists('/nonlocal/')
|
||||||
plasticState(p)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention
|
plasticState(p)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention
|
||||||
|
|
||||||
st0%rho => plasticState(p)%state0 (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
|
st0%rho => plasticState(p)%state0 (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
|
||||||
|
@ -961,39 +962,24 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
|
||||||
|
|
||||||
integer :: &
|
integer :: &
|
||||||
ph, &
|
ph, &
|
||||||
neighbor_instance, & !< instance of my neighbor's plasticity
|
|
||||||
ns, & !< short notation for the total number of active slip systems
|
ns, & !< short notation for the total number of active slip systems
|
||||||
c, & !< character of dislocation
|
c, & !< character of dislocation
|
||||||
n, & !< index of my current neighbor
|
|
||||||
neighbor_el, & !< element number of my neighbor
|
|
||||||
neighbor_ip, & !< integration point of my neighbor
|
|
||||||
neighbor_n, & !< neighbor index pointing to me when looking from my neighbor
|
|
||||||
opposite_neighbor, & !< index of my opposite neighbor
|
|
||||||
opposite_ip, & !< ip of my opposite neighbor
|
|
||||||
opposite_el, & !< element index of my opposite neighbor
|
|
||||||
opposite_n, & !< neighbor index pointing to me when looking from my opposite neighbor
|
|
||||||
t, & !< type of dislocation
|
t, & !< type of dislocation
|
||||||
no,& !< neighbor offset shortcut
|
|
||||||
np,& !< neighbor phase shortcut
|
|
||||||
topp, & !< type of dislocation with opposite sign to t
|
|
||||||
s !< index of my current slip system
|
s !< index of my current slip system
|
||||||
real(pReal), dimension(param(instance)%sum_N_sl,10) :: &
|
real(pReal), dimension(param(instance)%sum_N_sl,10) :: &
|
||||||
rho, &
|
rho, &
|
||||||
rho0, & !< dislocation density at beginning of time step
|
rho0, & !< dislocation density at beginning of time step
|
||||||
rhoDot, & !< density evolution
|
rhoDot, & !< density evolution
|
||||||
rhoDotMultiplication, & !< density evolution by multiplication
|
rhoDotMultiplication, & !< density evolution by multiplication
|
||||||
rhoDotFlux, & !< density evolution by flux
|
|
||||||
rhoDotSingle2DipoleGlide, & !< density evolution by dipole formation (by glide)
|
rhoDotSingle2DipoleGlide, & !< density evolution by dipole formation (by glide)
|
||||||
rhoDotAthermalAnnihilation, & !< density evolution by athermal annihilation
|
rhoDotAthermalAnnihilation, & !< density evolution by athermal annihilation
|
||||||
rhoDotThermalAnnihilation !< density evolution by thermal annihilation
|
rhoDotThermalAnnihilation !< density evolution by thermal annihilation
|
||||||
real(pReal), dimension(param(instance)%sum_N_sl,8) :: &
|
real(pReal), dimension(param(instance)%sum_N_sl,8) :: &
|
||||||
rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles)
|
rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles)
|
||||||
neighbor_rhoSgl0, & !< current single dislocation densities of neighboring ip (positive/negative screw and edge without dipoles)
|
|
||||||
my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles)
|
my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles)
|
||||||
real(pReal), dimension(param(instance)%sum_N_sl,4) :: &
|
real(pReal), dimension(param(instance)%sum_N_sl,4) :: &
|
||||||
v, & !< current dislocation glide velocity
|
v, & !< current dislocation glide velocity
|
||||||
v0, &
|
v0, &
|
||||||
neighbor_v0, & !< dislocation glide velocity of enighboring ip
|
|
||||||
gdot !< shear rates
|
gdot !< shear rates
|
||||||
real(pReal), dimension(param(instance)%sum_N_sl) :: &
|
real(pReal), dimension(param(instance)%sum_N_sl) :: &
|
||||||
tau, & !< current resolved shear stress
|
tau, & !< current resolved shear stress
|
||||||
|
@ -1002,23 +988,7 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
|
||||||
rhoDip, & !< current dipole dislocation densities (screw and edge dipoles)
|
rhoDip, & !< current dipole dislocation densities (screw and edge dipoles)
|
||||||
dLower, & !< minimum stable dipole distance for edges and screws
|
dLower, & !< minimum stable dipole distance for edges and screws
|
||||||
dUpper !< current maximum stable dipole distance for edges and screws
|
dUpper !< current maximum stable dipole distance for edges and screws
|
||||||
real(pReal), dimension(3,param(instance)%sum_N_sl,4) :: &
|
|
||||||
m !< direction of dislocation motion
|
|
||||||
real(pReal), dimension(3,3) :: &
|
|
||||||
my_F, & !< my total deformation gradient
|
|
||||||
neighbor_F, & !< total deformation gradient of my neighbor
|
|
||||||
my_Fe, & !< my elastic deformation gradient
|
|
||||||
neighbor_Fe, & !< elastic deformation gradient of my neighbor
|
|
||||||
Favg !< average total deformation gradient of me and my neighbor
|
|
||||||
real(pReal), dimension(3) :: &
|
|
||||||
normal_neighbor2me, & !< interface normal pointing from my neighbor to me in neighbor's lattice configuration
|
|
||||||
normal_neighbor2me_defConf, & !< interface normal pointing from my neighbor to me in shared deformed configuration
|
|
||||||
normal_me2neighbor, & !< interface normal pointing from me to my neighbor in my lattice configuration
|
|
||||||
normal_me2neighbor_defConf !< interface normal pointing from me to my neighbor in shared deformed configuration
|
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
area, & !< area of the current interface
|
|
||||||
transmissivity, & !< overall transmissivity of dislocation flux to neighboring material point
|
|
||||||
lineLength, & !< dislocation line length leaving the current interface
|
|
||||||
selfDiffusion !< self diffusion
|
selfDiffusion !< self diffusion
|
||||||
|
|
||||||
ph = material_phaseAt(1,el)
|
ph = material_phaseAt(1,el)
|
||||||
|
@ -1091,6 +1061,172 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
|
||||||
* sqrt(stt%rho_forest(:,of)) / prm%lambda0 / prm%burgers, 2, 4)
|
* sqrt(stt%rho_forest(:,of)) / prm%lambda0 / prm%burgers, 2, 4)
|
||||||
endif isBCC
|
endif isBCC
|
||||||
|
|
||||||
|
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of)
|
||||||
|
|
||||||
|
|
||||||
|
!****************************************************************************
|
||||||
|
!*** calculate dipole formation and annihilation
|
||||||
|
|
||||||
|
!*** formation by glide
|
||||||
|
do c = 1,2
|
||||||
|
rhoDotSingle2DipoleGlide(:,2*c-1) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
||||||
|
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
|
||||||
|
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
|
||||||
|
+ abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) ! positive mobile --> negative immobile
|
||||||
|
|
||||||
|
rhoDotSingle2DipoleGlide(:,2*c) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
||||||
|
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
|
||||||
|
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
|
||||||
|
+ abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c))) ! negative mobile --> positive immobile
|
||||||
|
|
||||||
|
rhoDotSingle2DipoleGlide(:,2*c+3) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
||||||
|
* rhoSgl(:,2*c+3) * abs(gdot(:,2*c)) ! negative mobile --> positive immobile
|
||||||
|
|
||||||
|
rhoDotSingle2DipoleGlide(:,2*c+4) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
||||||
|
* rhoSgl(:,2*c+4) * abs(gdot(:,2*c-1)) ! positive mobile --> negative immobile
|
||||||
|
|
||||||
|
rhoDotSingle2DipoleGlide(:,c+8) = abs(rhoDotSingle2DipoleGlide(:,2*c+3)) &
|
||||||
|
+ abs(rhoDotSingle2DipoleGlide(:,2*c+4)) &
|
||||||
|
- rhoDotSingle2DipoleGlide(:,2*c-1) &
|
||||||
|
- rhoDotSingle2DipoleGlide(:,2*c)
|
||||||
|
enddo
|
||||||
|
|
||||||
|
|
||||||
|
!*** athermal annihilation
|
||||||
|
rhoDotAthermalAnnihilation = 0.0_pReal
|
||||||
|
forall (c=1:2) &
|
||||||
|
rhoDotAthermalAnnihilation(:,c+8) = -2.0_pReal * dLower(:,c) / prm%burgers &
|
||||||
|
* ( 2.0_pReal * (rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) + rhoSgl(:,2*c) * abs(gdot(:,2*c-1))) & ! was single hitting single
|
||||||
|
+ 2.0_pReal * (abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c)) + abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) & ! was single hitting immobile single or was immobile single hit by single
|
||||||
|
+ rhoDip(:,c) * (abs(gdot(:,2*c-1)) + abs(gdot(:,2*c)))) ! single knocks dipole constituent
|
||||||
|
|
||||||
|
! annihilated screw dipoles leave edge jogs behind on the colinear system
|
||||||
|
if (lattice_structure(ph) == LATTICE_fcc_ID) &
|
||||||
|
forall (s = 1:ns, prm%colinearSystem(s) > 0) &
|
||||||
|
rhoDotAthermalAnnihilation(prm%colinearSystem(s),1:2) = - rhoDotAthermalAnnihilation(s,10) &
|
||||||
|
* 0.25_pReal * sqrt(stt%rho_forest(s,of)) * (dUpper(s,2) + dLower(s,2)) * prm%edgeJogFactor
|
||||||
|
|
||||||
|
|
||||||
|
!*** thermally activated annihilation of edge dipoles by climb
|
||||||
|
rhoDotThermalAnnihilation = 0.0_pReal
|
||||||
|
selfDiffusion = prm%Dsd0 * exp(-prm%selfDiffusionEnergy / (kB * Temperature))
|
||||||
|
vClimb = prm%atomicVolume * selfDiffusion * prm%mu &
|
||||||
|
/ ( kB * Temperature * PI * (1.0_pReal-prm%nu) * (dUpper(:,1) + dLower(:,1)))
|
||||||
|
forall (s = 1:ns, dUpper(s,1) > dLower(s,1)) &
|
||||||
|
rhoDotThermalAnnihilation(s,9) = max(- 4.0_pReal * rhoDip(s,1) * vClimb(s) / (dUpper(s,1) - dLower(s,1)), &
|
||||||
|
- rhoDip(s,1) / timestep - rhoDotAthermalAnnihilation(s,9) &
|
||||||
|
- rhoDotSingle2DipoleGlide(s,9)) ! make sure that we do not annihilate more dipoles than we have
|
||||||
|
|
||||||
|
rhoDot = rhoDotFlux(F,Fp,timestep, instance,of,ip,el) &
|
||||||
|
+ rhoDotMultiplication &
|
||||||
|
+ rhoDotSingle2DipoleGlide &
|
||||||
|
+ rhoDotAthermalAnnihilation &
|
||||||
|
+ rhoDotThermalAnnihilation
|
||||||
|
|
||||||
|
|
||||||
|
if ( any(rho(:,mob) + rhoDot(:,1:4) * timestep < -prm%atol_rho) &
|
||||||
|
.or. any(rho(:,dip) + rhoDot(:,9:10) * timestep < -prm%atol_rho)) then
|
||||||
|
#ifdef DEBUG
|
||||||
|
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0) then
|
||||||
|
write(6,'(a,i5,a,i2)') '<< CONST >> evolution rate leads to negative density at el ',el,' ip ',ip
|
||||||
|
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
|
||||||
|
endif
|
||||||
|
#endif
|
||||||
|
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN)
|
||||||
|
else
|
||||||
|
dot%rho(:,of) = pack(rhoDot,.true.)
|
||||||
|
dot%gamma(:,of) = sum(gdot,2)
|
||||||
|
endif
|
||||||
|
|
||||||
|
end associate
|
||||||
|
|
||||||
|
end subroutine plastic_nonlocal_dotState
|
||||||
|
|
||||||
|
|
||||||
|
!---------------------------------------------------------------------------------------------------
|
||||||
|
!> @brief calculates the rate of change of microstructure
|
||||||
|
!---------------------------------------------------------------------------------------------------
|
||||||
|
function rhoDotFlux(F,Fp,timestep, instance,of,ip,el)
|
||||||
|
|
||||||
|
real(pReal), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem), intent(in) :: &
|
||||||
|
F, & !< elastic deformation gradient
|
||||||
|
Fp !< plastic deformation gradient
|
||||||
|
real(pReal), intent(in) :: &
|
||||||
|
timestep !< substepped crystallite time increment
|
||||||
|
integer, intent(in) :: &
|
||||||
|
instance, &
|
||||||
|
of, &
|
||||||
|
ip, & !< current integration point
|
||||||
|
el !< current element number
|
||||||
|
|
||||||
|
integer :: &
|
||||||
|
ph, &
|
||||||
|
neighbor_instance, & !< instance of my neighbor's plasticity
|
||||||
|
ns, & !< short notation for the total number of active slip systems
|
||||||
|
c, & !< character of dislocation
|
||||||
|
n, & !< index of my current neighbor
|
||||||
|
neighbor_el, & !< element number of my neighbor
|
||||||
|
neighbor_ip, & !< integration point of my neighbor
|
||||||
|
neighbor_n, & !< neighbor index pointing to me when looking from my neighbor
|
||||||
|
opposite_neighbor, & !< index of my opposite neighbor
|
||||||
|
opposite_ip, & !< ip of my opposite neighbor
|
||||||
|
opposite_el, & !< element index of my opposite neighbor
|
||||||
|
opposite_n, & !< neighbor index pointing to me when looking from my opposite neighbor
|
||||||
|
t, & !< type of dislocation
|
||||||
|
no,& !< neighbor offset shortcut
|
||||||
|
np,& !< neighbor phase shortcut
|
||||||
|
topp, & !< type of dislocation with opposite sign to t
|
||||||
|
s !< index of my current slip system
|
||||||
|
real(pReal), dimension(param(instance)%sum_N_sl,10) :: &
|
||||||
|
rho, &
|
||||||
|
rho0, & !< dislocation density at beginning of time step
|
||||||
|
rhoDotFlux !< density evolution by flux
|
||||||
|
real(pReal), dimension(param(instance)%sum_N_sl,8) :: &
|
||||||
|
rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles)
|
||||||
|
neighbor_rhoSgl0, & !< current single dislocation densities of neighboring ip (positive/negative screw and edge without dipoles)
|
||||||
|
my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles)
|
||||||
|
real(pReal), dimension(param(instance)%sum_N_sl,4) :: &
|
||||||
|
v, & !< current dislocation glide velocity
|
||||||
|
v0, &
|
||||||
|
neighbor_v0, & !< dislocation glide velocity of enighboring ip
|
||||||
|
gdot !< shear rates
|
||||||
|
real(pReal), dimension(3,param(instance)%sum_N_sl,4) :: &
|
||||||
|
m !< direction of dislocation motion
|
||||||
|
real(pReal), dimension(3,3) :: &
|
||||||
|
my_F, & !< my total deformation gradient
|
||||||
|
neighbor_F, & !< total deformation gradient of my neighbor
|
||||||
|
my_Fe, & !< my elastic deformation gradient
|
||||||
|
neighbor_Fe, & !< elastic deformation gradient of my neighbor
|
||||||
|
Favg !< average total deformation gradient of me and my neighbor
|
||||||
|
real(pReal), dimension(3) :: &
|
||||||
|
normal_neighbor2me, & !< interface normal pointing from my neighbor to me in neighbor's lattice configuration
|
||||||
|
normal_neighbor2me_defConf, & !< interface normal pointing from my neighbor to me in shared deformed configuration
|
||||||
|
normal_me2neighbor, & !< interface normal pointing from me to my neighbor in my lattice configuration
|
||||||
|
normal_me2neighbor_defConf !< interface normal pointing from me to my neighbor in shared deformed configuration
|
||||||
|
real(pReal) :: &
|
||||||
|
area, & !< area of the current interface
|
||||||
|
transmissivity, & !< overall transmissivity of dislocation flux to neighboring material point
|
||||||
|
lineLength !< dislocation line length leaving the current interface
|
||||||
|
|
||||||
|
ph = material_phaseAt(1,el)
|
||||||
|
|
||||||
|
associate(prm => param(instance), &
|
||||||
|
dst => microstructure(instance), &
|
||||||
|
dot => dotState(instance), &
|
||||||
|
stt => state(instance))
|
||||||
|
ns = prm%sum_N_sl
|
||||||
|
|
||||||
|
gdot = 0.0_pReal
|
||||||
|
|
||||||
|
rho = getRho(instance,of,ip,el)
|
||||||
|
rhoSgl = rho(:,sgl)
|
||||||
|
rho0 = getRho0(instance,of,ip,el)
|
||||||
|
my_rhoSgl0 = rho0(:,sgl)
|
||||||
|
|
||||||
|
forall (s = 1:ns, t = 1:4) v(s,t) = plasticState(ph)%state(iV(s,t,instance),of) !ToDo: MD: I think we should use state0 here
|
||||||
|
gdot = rhoSgl(:,1:4) * v * spread(prm%burgers,2,4)
|
||||||
|
|
||||||
|
|
||||||
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of)
|
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of)
|
||||||
|
|
||||||
!****************************************************************************
|
!****************************************************************************
|
||||||
|
@ -1113,7 +1249,7 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
|
||||||
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
|
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
|
||||||
endif
|
endif
|
||||||
#endif
|
#endif
|
||||||
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN) ! -> return NaN and, hence, enforce cutback
|
rhoDotFlux = IEEE_value(1.0_pReal,IEEE_quiet_NaN) ! enforce cutback
|
||||||
return
|
return
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -1239,108 +1375,9 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
|
||||||
enddo neighbors
|
enddo neighbors
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
!****************************************************************************
|
|
||||||
!*** calculate dipole formation and annihilation
|
|
||||||
|
|
||||||
!*** formation by glide
|
|
||||||
do c = 1,2
|
|
||||||
rhoDotSingle2DipoleGlide(:,2*c-1) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
|
||||||
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
|
|
||||||
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
|
|
||||||
+ abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) ! positive mobile --> negative immobile
|
|
||||||
|
|
||||||
rhoDotSingle2DipoleGlide(:,2*c) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
|
||||||
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
|
|
||||||
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
|
|
||||||
+ abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c))) ! negative mobile --> positive immobile
|
|
||||||
|
|
||||||
rhoDotSingle2DipoleGlide(:,2*c+3) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
|
||||||
* rhoSgl(:,2*c+3) * abs(gdot(:,2*c)) ! negative mobile --> positive immobile
|
|
||||||
|
|
||||||
rhoDotSingle2DipoleGlide(:,2*c+4) = -2.0_pReal * dUpper(:,c) / prm%burgers &
|
|
||||||
* rhoSgl(:,2*c+4) * abs(gdot(:,2*c-1)) ! positive mobile --> negative immobile
|
|
||||||
|
|
||||||
rhoDotSingle2DipoleGlide(:,c+8) = abs(rhoDotSingle2DipoleGlide(:,2*c+3)) &
|
|
||||||
+ abs(rhoDotSingle2DipoleGlide(:,2*c+4)) &
|
|
||||||
- rhoDotSingle2DipoleGlide(:,2*c-1) &
|
|
||||||
- rhoDotSingle2DipoleGlide(:,2*c)
|
|
||||||
enddo
|
|
||||||
|
|
||||||
|
|
||||||
!*** athermal annihilation
|
|
||||||
rhoDotAthermalAnnihilation = 0.0_pReal
|
|
||||||
forall (c=1:2) &
|
|
||||||
rhoDotAthermalAnnihilation(:,c+8) = -2.0_pReal * dLower(:,c) / prm%burgers &
|
|
||||||
* ( 2.0_pReal * (rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) + rhoSgl(:,2*c) * abs(gdot(:,2*c-1))) & ! was single hitting single
|
|
||||||
+ 2.0_pReal * (abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c)) + abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) & ! was single hitting immobile single or was immobile single hit by single
|
|
||||||
+ rhoDip(:,c) * (abs(gdot(:,2*c-1)) + abs(gdot(:,2*c)))) ! single knocks dipole constituent
|
|
||||||
|
|
||||||
! annihilated screw dipoles leave edge jogs behind on the colinear system
|
|
||||||
if (lattice_structure(ph) == LATTICE_fcc_ID) &
|
|
||||||
forall (s = 1:ns, prm%colinearSystem(s) > 0) &
|
|
||||||
rhoDotAthermalAnnihilation(prm%colinearSystem(s),1:2) = - rhoDotAthermalAnnihilation(s,10) &
|
|
||||||
* 0.25_pReal * sqrt(stt%rho_forest(s,of)) * (dUpper(s,2) + dLower(s,2)) * prm%edgeJogFactor
|
|
||||||
|
|
||||||
|
|
||||||
!*** thermally activated annihilation of edge dipoles by climb
|
|
||||||
rhoDotThermalAnnihilation = 0.0_pReal
|
|
||||||
selfDiffusion = prm%Dsd0 * exp(-prm%selfDiffusionEnergy / (kB * Temperature))
|
|
||||||
vClimb = prm%atomicVolume * selfDiffusion * prm%mu &
|
|
||||||
/ ( kB * Temperature * PI * (1.0_pReal-prm%nu) * (dUpper(:,1) + dLower(:,1)))
|
|
||||||
forall (s = 1:ns, dUpper(s,1) > dLower(s,1)) &
|
|
||||||
rhoDotThermalAnnihilation(s,9) = max(- 4.0_pReal * rhoDip(s,1) * vClimb(s) / (dUpper(s,1) - dLower(s,1)), &
|
|
||||||
- rhoDip(s,1) / timestep - rhoDotAthermalAnnihilation(s,9) &
|
|
||||||
- rhoDotSingle2DipoleGlide(s,9)) ! make sure that we do not annihilate more dipoles than we have
|
|
||||||
|
|
||||||
rhoDot = rhoDotFlux &
|
|
||||||
+ rhoDotMultiplication &
|
|
||||||
+ rhoDotSingle2DipoleGlide &
|
|
||||||
+ rhoDotAthermalAnnihilation &
|
|
||||||
+ rhoDotThermalAnnihilation
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0 &
|
|
||||||
.and. ((debug_e == el .and. debug_i == ip)&
|
|
||||||
.or. .not. iand(debug_level(debug_constitutive),debug_levelSelective) /= 0 )) then
|
|
||||||
write(6,'(a,/,4(12x,12(e12.5,1x),/))') '<< CONST >> dislocation multiplication', &
|
|
||||||
rhoDotMultiplication(:,1:4) * timestep
|
|
||||||
write(6,'(a,/,8(12x,12(e12.5,1x),/))') '<< CONST >> dislocation flux', &
|
|
||||||
rhoDotFlux(:,1:8) * timestep
|
|
||||||
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> dipole formation by glide', &
|
|
||||||
rhoDotSingle2DipoleGlide * timestep
|
|
||||||
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> athermal dipole annihilation', &
|
|
||||||
rhoDotAthermalAnnihilation * timestep
|
|
||||||
write(6,'(a,/,2(12x,12(e12.5,1x),/))') '<< CONST >> thermally activated dipole annihilation', &
|
|
||||||
rhoDotThermalAnnihilation(:,9:10) * timestep
|
|
||||||
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> total density change', &
|
|
||||||
rhoDot * timestep
|
|
||||||
write(6,'(a,/,10(12x,12(f12.5,1x),/))') '<< CONST >> relative density change', &
|
|
||||||
rhoDot(:,1:8) * timestep / (abs(stt%rho(:,sgl))+1.0e-10), &
|
|
||||||
rhoDot(:,9:10) * timestep / (stt%rho(:,dip)+1.0e-10)
|
|
||||||
write(6,*)
|
|
||||||
endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
if ( any(rho(:,mob) + rhoDot(:,1:4) * timestep < -prm%atol_rho) &
|
|
||||||
.or. any(rho(:,dip) + rhoDot(:,9:10) * timestep < -prm%atol_rho)) then
|
|
||||||
#ifdef DEBUG
|
|
||||||
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0) then
|
|
||||||
write(6,'(a,i5,a,i2)') '<< CONST >> evolution rate leads to negative density at el ',el,' ip ',ip
|
|
||||||
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
|
|
||||||
endif
|
|
||||||
#endif
|
|
||||||
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN)
|
|
||||||
else
|
|
||||||
dot%rho(:,of) = pack(rhoDot,.true.)
|
|
||||||
dot%gamma(:,of) = sum(gdot,2)
|
|
||||||
endif
|
|
||||||
|
|
||||||
end associate
|
end associate
|
||||||
|
|
||||||
end subroutine plastic_nonlocal_dotState
|
end function rhoDotFlux
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -213,7 +213,7 @@ module subroutine plastic_phenopowerlaw_init
|
||||||
+ size(['xi_tw ','gamma_tw']) * prm%sum_N_tw
|
+ size(['xi_tw ','gamma_tw']) * prm%sum_N_tw
|
||||||
sizeState = sizeDotState
|
sizeState = sizeDotState
|
||||||
|
|
||||||
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0)
|
call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state aliases and initialization
|
! state aliases and initialization
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -132,11 +132,11 @@ subroutine grid_mech_FEM_init
|
||||||
[grid(1)],[grid(2)],localK, &
|
[grid(1)],[grid(2)],localK, &
|
||||||
mech_grid,ierr)
|
mech_grid,ierr)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call DMDASetUniformCoordinates(mech_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),ierr)
|
|
||||||
CHKERRQ(ierr)
|
|
||||||
call SNESSetDM(mech_snes,mech_grid,ierr); CHKERRQ(ierr)
|
call SNESSetDM(mech_snes,mech_grid,ierr); CHKERRQ(ierr)
|
||||||
call DMsetFromOptions(mech_grid,ierr); CHKERRQ(ierr)
|
call DMsetFromOptions(mech_grid,ierr); CHKERRQ(ierr)
|
||||||
call DMsetUp(mech_grid,ierr); CHKERRQ(ierr)
|
call DMsetUp(mech_grid,ierr); CHKERRQ(ierr)
|
||||||
|
call DMDASetUniformCoordinates(mech_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),ierr)
|
||||||
|
CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(mech_grid,solution_current,ierr); CHKERRQ(ierr)
|
call DMCreateGlobalVector(mech_grid,solution_current,ierr); CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(mech_grid,solution_lastInc,ierr); CHKERRQ(ierr)
|
call DMCreateGlobalVector(mech_grid,solution_lastInc,ierr); CHKERRQ(ierr)
|
||||||
call DMCreateGlobalVector(mech_grid,solution_rate ,ierr); CHKERRQ(ierr)
|
call DMCreateGlobalVector(mech_grid,solution_rate ,ierr); CHKERRQ(ierr)
|
||||||
|
|
|
@ -27,33 +27,22 @@ module homogenization
|
||||||
implicit none
|
implicit none
|
||||||
private
|
private
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
! General variables for the homogenization at a material point
|
|
||||||
logical, public :: &
|
logical, public :: &
|
||||||
terminallyIll = .false. !< at least one material point is terminally ill
|
terminallyIll = .false. !< at least one material point is terminally ill
|
||||||
real(pReal), dimension(:,:,:,:), allocatable, public :: &
|
|
||||||
materialpoint_F0, & !< def grad of IP at start of FE increment
|
|
||||||
materialpoint_F, & !< def grad of IP to be reached at end of FE increment
|
|
||||||
materialpoint_P !< first P--K stress of IP
|
|
||||||
real(pReal), dimension(:,:,:,:,:,:), allocatable, public :: &
|
|
||||||
materialpoint_dPdF !< tangent of first P--K stress at IP
|
|
||||||
|
|
||||||
real(pReal), dimension(:,:,:,:), allocatable :: &
|
!--------------------------------------------------------------------------------------------------
|
||||||
materialpoint_subF0, & !< def grad of IP at beginning of homogenization increment
|
! General variables for the homogenization at a material point
|
||||||
materialpoint_subF !< def grad of IP to be reached at end of homog inc
|
real(pReal), dimension(:,:,:,:), allocatable, public :: &
|
||||||
real(pReal), dimension(:,:), allocatable :: &
|
materialpoint_F0, & !< def grad of IP at start of FE increment
|
||||||
materialpoint_subFrac, &
|
materialpoint_F !< def grad of IP to be reached at end of FE increment
|
||||||
materialpoint_subStep, &
|
real(pReal), dimension(:,:,:,:), allocatable, public, protected :: &
|
||||||
materialpoint_subdt
|
materialpoint_P !< first P--K stress of IP
|
||||||
logical, dimension(:,:), allocatable :: &
|
real(pReal), dimension(:,:,:,:,:,:), allocatable, public, protected :: &
|
||||||
materialpoint_requested, &
|
materialpoint_dPdF !< tangent of first P--K stress at IP
|
||||||
materialpoint_converged
|
|
||||||
logical, dimension(:,:,:), allocatable :: &
|
|
||||||
materialpoint_doneAndHappy
|
|
||||||
|
|
||||||
type :: tNumerics
|
type :: tNumerics
|
||||||
integer :: &
|
integer :: &
|
||||||
nMPstate !< materialpoint state loop limit
|
nMPstate !< materialpoint state loop limit
|
||||||
real(pReal) :: &
|
real(pReal) :: &
|
||||||
subStepMinHomog, & !< minimum (relative) size of sub-step allowed during cutback in homogenization
|
subStepMinHomog, & !< minimum (relative) size of sub-step allowed during cutback in homogenization
|
||||||
subStepSizeHomog, & !< size of first substep when cutback in homogenization
|
subStepSizeHomog, & !< size of first substep when cutback in homogenization
|
||||||
|
@ -161,15 +150,7 @@ subroutine homogenization_init
|
||||||
allocate(materialpoint_dPdF(3,3,3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
allocate(materialpoint_dPdF(3,3,3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
||||||
materialpoint_F0 = spread(spread(math_I3,3,discretization_nIP),4,discretization_nElem) ! initialize to identity
|
materialpoint_F0 = spread(spread(math_I3,3,discretization_nIP),4,discretization_nElem) ! initialize to identity
|
||||||
materialpoint_F = materialpoint_F0 ! initialize to identity
|
materialpoint_F = materialpoint_F0 ! initialize to identity
|
||||||
allocate(materialpoint_subF0(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
|
||||||
allocate(materialpoint_subF(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
|
||||||
allocate(materialpoint_P(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
allocate(materialpoint_P(3,3,discretization_nIP,discretization_nElem), source=0.0_pReal)
|
||||||
allocate(materialpoint_subFrac(discretization_nIP,discretization_nElem), source=0.0_pReal)
|
|
||||||
allocate(materialpoint_subStep(discretization_nIP,discretization_nElem), source=0.0_pReal)
|
|
||||||
allocate(materialpoint_subdt(discretization_nIP,discretization_nElem), source=0.0_pReal)
|
|
||||||
allocate(materialpoint_requested(discretization_nIP,discretization_nElem), source=.false.)
|
|
||||||
allocate(materialpoint_converged(discretization_nIP,discretization_nElem), source=.true.)
|
|
||||||
allocate(materialpoint_doneAndHappy(2,discretization_nIP,discretization_nElem), source=.true.)
|
|
||||||
|
|
||||||
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'; flush(6)
|
write(6,'(/,a)') ' <<<+- homogenization init -+>>>'; flush(6)
|
||||||
|
|
||||||
|
@ -203,6 +184,14 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
e, & !< element number
|
e, & !< element number
|
||||||
mySource, &
|
mySource, &
|
||||||
myNgrains
|
myNgrains
|
||||||
|
real(pReal), dimension(discretization_nIP,discretization_nElem) :: &
|
||||||
|
subFrac, &
|
||||||
|
subStep
|
||||||
|
logical, dimension(discretization_nIP,discretization_nElem) :: &
|
||||||
|
requested, &
|
||||||
|
converged
|
||||||
|
logical, dimension(2,discretization_nIP,discretization_nElem) :: &
|
||||||
|
doneAndHappy
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0) then
|
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0) then
|
||||||
|
@ -216,7 +205,7 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! initialize restoration points of ...
|
! initialize restoration points
|
||||||
do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
||||||
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
||||||
do i = FEsolving_execIP(1),FEsolving_execIP(2);
|
do i = FEsolving_execIP(1),FEsolving_execIP(2);
|
||||||
|
@ -238,74 +227,60 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
|
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
subFrac(i,e) = 0.0_pReal
|
||||||
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_F0(1:3,1:3,i,e)
|
converged(i,e) = .false. ! pretend failed step ...
|
||||||
materialpoint_subFrac(i,e) = 0.0_pReal
|
subStep(i,e) = 1.0_pReal/num%subStepSizeHomog ! ... larger then the requested calculation
|
||||||
materialpoint_subStep(i,e) = 1.0_pReal/num%subStepSizeHomog ! <<added to adopt flexibility in cutback size>>
|
requested(i,e) = .true. ! everybody requires calculation
|
||||||
materialpoint_converged(i,e) = .false. ! pretend failed step of twice the required size
|
|
||||||
materialpoint_requested(i,e) = .true. ! everybody requires calculation
|
|
||||||
|
|
||||||
if (homogState(material_homogenizationAt(e))%sizeState > 0) &
|
if (homogState(material_homogenizationAt(e))%sizeState > 0) &
|
||||||
homogState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
homogState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
||||||
homogState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e)) ! ...internal homogenization state
|
homogState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e))
|
||||||
|
|
||||||
if (thermalState(material_homogenizationAt(e))%sizeState > 0) &
|
if (thermalState(material_homogenizationAt(e))%sizeState > 0) &
|
||||||
thermalState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
thermalState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
||||||
thermalState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e)) ! ...internal thermal state
|
thermalState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e))
|
||||||
|
|
||||||
if (damageState(material_homogenizationAt(e))%sizeState > 0) &
|
if (damageState(material_homogenizationAt(e))%sizeState > 0) &
|
||||||
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
||||||
damageState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e)) ! ...internal damage state
|
damageState(material_homogenizationAt(e))%State0( :,material_homogenizationMemberAt(i,e))
|
||||||
enddo
|
enddo
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
NiterationHomog = 0
|
NiterationHomog = 0
|
||||||
|
|
||||||
cutBackLooping: do while (.not. terminallyIll .and. &
|
cutBackLooping: do while (.not. terminallyIll .and. &
|
||||||
any(materialpoint_subStep(:,FEsolving_execELem(1):FEsolving_execElem(2)) > num%subStepMinHomog))
|
any(subStep(:,FEsolving_execELem(1):FEsolving_execElem(2)) > num%subStepMinHomog))
|
||||||
|
|
||||||
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
||||||
elementLooping1: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
elementLooping1: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
||||||
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
||||||
IpLooping1: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
IpLooping1: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
||||||
|
|
||||||
converged: if (materialpoint_converged(i,e)) then
|
if (converged(i,e)) then
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (iand(debug_level(debug_homogenization), debug_levelExtensive) /= 0 &
|
if (iand(debug_level(debug_homogenization), debug_levelExtensive) /= 0 &
|
||||||
.and. ((e == debug_e .and. i == debug_i) &
|
.and. ((e == debug_e .and. i == debug_i) &
|
||||||
.or. .not. iand(debug_level(debug_homogenization),debug_levelSelective) /= 0)) then
|
.or. .not. iand(debug_level(debug_homogenization),debug_levelSelective) /= 0)) then
|
||||||
write(6,'(a,1x,f12.8,1x,a,1x,f12.8,1x,a,i8,1x,i2/)') '<< HOMOG >> winding forward from', &
|
write(6,'(a,1x,f12.8,1x,a,1x,f12.8,1x,a,i8,1x,i2/)') '<< HOMOG >> winding forward from', &
|
||||||
materialpoint_subFrac(i,e), 'to current materialpoint_subFrac', &
|
subFrac(i,e), 'to current subFrac', &
|
||||||
materialpoint_subFrac(i,e)+materialpoint_subStep(i,e),'in materialpoint_stressAndItsTangent at el ip',e,i
|
subFrac(i,e)+subStep(i,e),'in materialpoint_stressAndItsTangent at el ip',e,i
|
||||||
endif
|
endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
! calculate new subStep and new subFrac
|
! calculate new subStep and new subFrac
|
||||||
materialpoint_subFrac(i,e) = materialpoint_subFrac(i,e) + materialpoint_subStep(i,e)
|
subFrac(i,e) = subFrac(i,e) + subStep(i,e)
|
||||||
materialpoint_subStep(i,e) = min(1.0_pReal-materialpoint_subFrac(i,e), &
|
subStep(i,e) = min(1.0_pReal-subFrac(i,e),num%stepIncreaseHomog*subStep(i,e)) ! introduce flexibility for step increase/acceleration
|
||||||
num%stepIncreaseHomog*materialpoint_subStep(i,e)) ! introduce flexibility for step increase/acceleration
|
|
||||||
|
|
||||||
steppingNeeded: if (materialpoint_subStep(i,e) > num%subStepMinHomog) then
|
steppingNeeded: if (subStep(i,e) > num%subStepMinHomog) then
|
||||||
|
|
||||||
! wind forward grain starting point of...
|
! wind forward grain starting point
|
||||||
crystallite_partionedF0 (1:3,1:3,1:myNgrains,i,e) = &
|
crystallite_partionedF0 (1:3,1:3,1:myNgrains,i,e) = crystallite_partionedF(1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedF(1:3,1:3,1:myNgrains,i,e)
|
crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Fp (1:3,1:3,1:myNgrains,i,e)
|
||||||
|
crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e) = crystallite_Lp (1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedFp0 (1:3,1:3,1:myNgrains,i,e) = &
|
crystallite_partionedFi0(1:3,1:3,1:myNgrains,i,e) = crystallite_Fi (1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_Fp (1:3,1:3,1:myNgrains,i,e)
|
crystallite_partionedLi0(1:3,1:3,1:myNgrains,i,e) = crystallite_Li (1:3,1:3,1:myNgrains,i,e)
|
||||||
|
crystallite_partionedS0 (1:3,1:3,1:myNgrains,i,e) = crystallite_S (1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedLp0 (1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_Lp (1:3,1:3,1:myNgrains,i,e)
|
|
||||||
|
|
||||||
crystallite_partionedFi0 (1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_Fi (1:3,1:3,1:myNgrains,i,e)
|
|
||||||
|
|
||||||
crystallite_partionedLi0 (1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_Li (1:3,1:3,1:myNgrains,i,e)
|
|
||||||
|
|
||||||
crystallite_partionedS0 (1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_S (1:3,1:3,1:myNgrains,i,e)
|
|
||||||
|
|
||||||
do g = 1,myNgrains
|
do g = 1,myNgrains
|
||||||
plasticState (material_phaseAt(g,e))%partionedState0(:,material_phasememberAt(g,i,e)) = &
|
plasticState (material_phaseAt(g,e))%partionedState0(:,material_phasememberAt(g,i,e)) = &
|
||||||
|
@ -326,15 +301,12 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e)) = &
|
||||||
damageState(material_homogenizationAt(e))%State (:,material_homogenizationMemberAt(i,e))
|
damageState(material_homogenizationAt(e))%State (:,material_homogenizationMemberAt(i,e))
|
||||||
|
|
||||||
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_subF(1:3,1:3,i,e)
|
|
||||||
|
|
||||||
endif steppingNeeded
|
endif steppingNeeded
|
||||||
|
|
||||||
else converged
|
else
|
||||||
if ( (myNgrains == 1 .and. materialpoint_subStep(i,e) <= 1.0 ) .or. & ! single grain already tried internal subStepping in crystallite
|
if ( (myNgrains == 1 .and. subStep(i,e) <= 1.0 ) .or. & ! single grain already tried internal subStepping in crystallite
|
||||||
num%subStepSizeHomog * materialpoint_subStep(i,e) <= num%subStepMinHomog ) then ! would require too small subStep
|
num%subStepSizeHomog * subStep(i,e) <= num%subStepMinHomog ) then ! would require too small subStep
|
||||||
! cutback makes no sense
|
! cutback makes no sense
|
||||||
!$OMP FLUSH(terminallyIll)
|
|
||||||
if (.not. terminallyIll) then ! so first signals terminally ill...
|
if (.not. terminallyIll) then ! so first signals terminally ill...
|
||||||
!$OMP CRITICAL (write2out)
|
!$OMP CRITICAL (write2out)
|
||||||
write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill'
|
write(6,*) 'Integration point ', i,' at element ', e, ' terminally ill'
|
||||||
|
@ -342,32 +314,27 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
endif
|
endif
|
||||||
terminallyIll = .true. ! ...and kills all others
|
terminallyIll = .true. ! ...and kills all others
|
||||||
else ! cutback makes sense
|
else ! cutback makes sense
|
||||||
materialpoint_subStep(i,e) = num%subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback
|
subStep(i,e) = num%subStepSizeHomog * subStep(i,e) ! crystallite had severe trouble, so do a significant cutback
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (iand(debug_level(debug_homogenization), debug_levelExtensive) /= 0 &
|
if (iand(debug_level(debug_homogenization), debug_levelExtensive) /= 0 &
|
||||||
.and. ((e == debug_e .and. i == debug_i) &
|
.and. ((e == debug_e .and. i == debug_i) &
|
||||||
.or. .not. iand(debug_level(debug_homogenization), debug_levelSelective) /= 0)) then
|
.or. .not. iand(debug_level(debug_homogenization), debug_levelSelective) /= 0)) then
|
||||||
write(6,'(a,1x,f12.8,a,i8,1x,i2/)') &
|
write(6,'(a,1x,f12.8,a,i8,1x,i2/)') &
|
||||||
'<< HOMOG >> cutback step in materialpoint_stressAndItsTangent with new materialpoint_subStep:',&
|
'<< HOMOG >> cutback step in materialpoint_stressAndItsTangent with new subStep:',&
|
||||||
materialpoint_subStep(i,e),' at el ip',e,i
|
subStep(i,e),' at el ip',e,i
|
||||||
endif
|
endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! restore...
|
! restore
|
||||||
if (materialpoint_subStep(i,e) < 1.0_pReal) then ! protect against fake cutback from \Delta t = 2 to 1. Maybe that "trick" is not necessary anymore at all? I.e. start with \Delta t = 1
|
if (subStep(i,e) < 1.0_pReal) then ! protect against fake cutback from \Delta t = 2 to 1. Maybe that "trick" is not necessary anymore at all? I.e. start with \Delta t = 1
|
||||||
crystallite_Lp(1:3,1:3,1:myNgrains,i,e) = &
|
crystallite_Lp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedLp0(1:3,1:3,1:myNgrains,i,e)
|
crystallite_Li(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedLi0(1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_Li(1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_partionedLi0(1:3,1:3,1:myNgrains,i,e)
|
|
||||||
endif ! maybe protecting everything from overwriting (not only L) makes even more sense
|
endif ! maybe protecting everything from overwriting (not only L) makes even more sense
|
||||||
crystallite_Fp(1:3,1:3,1:myNgrains,i,e) = &
|
crystallite_Fp(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedFp0(1:3,1:3,1:myNgrains,i,e)
|
crystallite_Fi(1:3,1:3,1:myNgrains,i,e) = crystallite_partionedFi0(1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_Fi(1:3,1:3,1:myNgrains,i,e) = &
|
crystallite_S (1:3,1:3,1:myNgrains,i,e) = crystallite_partionedS0 (1:3,1:3,1:myNgrains,i,e)
|
||||||
crystallite_partionedFi0(1:3,1:3,1:myNgrains,i,e)
|
|
||||||
crystallite_S(1:3,1:3,1:myNgrains,i,e) = &
|
|
||||||
crystallite_partionedS0(1:3,1:3,1:myNgrains,i,e)
|
|
||||||
do g = 1, myNgrains
|
do g = 1, myNgrains
|
||||||
plasticState (material_phaseAt(g,e))%state( :,material_phasememberAt(g,i,e)) = &
|
plasticState (material_phaseAt(g,e))%state( :,material_phasememberAt(g,i,e)) = &
|
||||||
plasticState (material_phaseAt(g,e))%partionedState0(:,material_phasememberAt(g,i,e))
|
plasticState (material_phaseAt(g,e))%partionedState0(:,material_phasememberAt(g,i,e))
|
||||||
|
@ -386,15 +353,11 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
damageState(material_homogenizationAt(e))%State( :,material_homogenizationMemberAt(i,e)) = &
|
damageState(material_homogenizationAt(e))%State( :,material_homogenizationMemberAt(i,e)) = &
|
||||||
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e))
|
damageState(material_homogenizationAt(e))%subState0(:,material_homogenizationMemberAt(i,e))
|
||||||
endif
|
endif
|
||||||
endif converged
|
endif
|
||||||
|
|
||||||
if (materialpoint_subStep(i,e) > num%subStepMinHomog) then
|
if (subStep(i,e) > num%subStepMinHomog) then
|
||||||
materialpoint_requested(i,e) = .true.
|
requested(i,e) = .true.
|
||||||
materialpoint_subF(1:3,1:3,i,e) = materialpoint_subF0(1:3,1:3,i,e) &
|
doneAndHappy(1:2,i,e) = [.false.,.true.]
|
||||||
+ materialpoint_subStep(i,e) * (materialpoint_F(1:3,1:3,i,e) &
|
|
||||||
- materialpoint_F0(1:3,1:3,i,e))
|
|
||||||
materialpoint_subdt(i,e) = materialpoint_subStep(i,e) * dt
|
|
||||||
materialpoint_doneAndHappy(1:2,i,e) = [.false.,.true.]
|
|
||||||
endif
|
endif
|
||||||
enddo IpLooping1
|
enddo IpLooping1
|
||||||
enddo elementLooping1
|
enddo elementLooping1
|
||||||
|
@ -403,24 +366,24 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
NiterationMPstate = 0
|
NiterationMPstate = 0
|
||||||
|
|
||||||
convergenceLooping: do while (.not. terminallyIll .and. &
|
convergenceLooping: do while (.not. terminallyIll .and. &
|
||||||
any( materialpoint_requested(:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
any( requested(:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
||||||
.and. .not. materialpoint_doneAndHappy(1,:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
.and. .not. doneAndHappy(1,:,FEsolving_execELem(1):FEsolving_execElem(2)) &
|
||||||
) .and. &
|
) .and. &
|
||||||
NiterationMPstate < num%nMPstate)
|
NiterationMPstate < num%nMPstate)
|
||||||
NiterationMPstate = NiterationMPstate + 1
|
NiterationMPstate = NiterationMPstate + 1
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! deformation partitioning
|
! deformation partitioning
|
||||||
! based on materialpoint_subF0,.._subF,crystallite_partionedF0, and homogenization_state,
|
|
||||||
! results in crystallite_partionedF
|
|
||||||
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
!$OMP PARALLEL DO PRIVATE(myNgrains)
|
||||||
elementLooping2: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
elementLooping2: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
||||||
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
|
||||||
IpLooping2: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
IpLooping2: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
||||||
if ( materialpoint_requested(i,e) .and. & ! process requested but...
|
if(requested(i,e) .and. .not. doneAndHappy(1,i,e)) then ! requested but not yet done
|
||||||
.not. materialpoint_doneAndHappy(1,i,e)) then ! ...not yet done material points
|
call partitionDeformation(materialpoint_F0(1:3,1:3,i,e) &
|
||||||
call partitionDeformation(i,e) ! partition deformation onto constituents
|
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e))&
|
||||||
crystallite_dt(1:myNgrains,i,e) = materialpoint_subdt(i,e) ! propagate materialpoint dt to grains
|
*(subStep(i,e)+subFrac(i,e)), &
|
||||||
|
i,e)
|
||||||
|
crystallite_dt(1:myNgrains,i,e) = dt*subStep(i,e) ! propagate materialpoint dt to grains
|
||||||
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
|
||||||
else
|
else
|
||||||
crystallite_requested(1:myNgrains,i,e) = .false. ! calculation for constituents not required anymore
|
crystallite_requested(1:myNgrains,i,e) = .false. ! calculation for constituents not required anymore
|
||||||
|
@ -431,23 +394,23 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! crystallite integration
|
! crystallite integration
|
||||||
! based on crystallite_partionedF0,.._partionedF
|
converged = crystallite_stress() !ToDo: MD not sure if that is the best logic
|
||||||
! incrementing by crystallite_dt
|
|
||||||
|
|
||||||
materialpoint_converged = crystallite_stress() !ToDo: MD not sure if that is the best logic
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! state update
|
! state update
|
||||||
!$OMP PARALLEL DO
|
!$OMP PARALLEL DO
|
||||||
elementLooping3: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
elementLooping3: do e = FEsolving_execElem(1),FEsolving_execElem(2)
|
||||||
IpLooping3: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
IpLooping3: do i = FEsolving_execIP(1),FEsolving_execIP(2)
|
||||||
if ( materialpoint_requested(i,e) .and. &
|
if (requested(i,e) .and. .not. doneAndHappy(1,i,e)) then
|
||||||
.not. materialpoint_doneAndHappy(1,i,e)) then
|
if (.not. converged(i,e)) then
|
||||||
if (.not. materialpoint_converged(i,e)) then
|
doneAndHappy(1:2,i,e) = [.true.,.false.]
|
||||||
materialpoint_doneAndHappy(1:2,i,e) = [.true.,.false.]
|
|
||||||
else
|
else
|
||||||
materialpoint_doneAndHappy(1:2,i,e) = updateState(i,e)
|
doneAndHappy(1:2,i,e) = updateState(dt*subStep(i,e), &
|
||||||
materialpoint_converged(i,e) = all(materialpoint_doneAndHappy(1:2,i,e)) ! converged if done and happy
|
materialpoint_F0(1:3,1:3,i,e) &
|
||||||
|
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e)) &
|
||||||
|
*(subStep(i,e)+subFrac(i,e)), &
|
||||||
|
i,e)
|
||||||
|
converged(i,e) = all(doneAndHappy(1:2,i,e)) ! converged if done and happy
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
enddo IpLooping3
|
enddo IpLooping3
|
||||||
|
@ -481,29 +444,31 @@ end subroutine materialpoint_stressAndItsTangent
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief partition material point def grad onto constituents
|
!> @brief partition material point def grad onto constituents
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine partitionDeformation(ip,el)
|
subroutine partitionDeformation(subF,ip,el)
|
||||||
|
|
||||||
integer, intent(in) :: &
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
ip, & !< integration point
|
subF
|
||||||
el !< element number
|
integer, intent(in) :: &
|
||||||
|
ip, & !< integration point
|
||||||
|
el !< element number
|
||||||
|
|
||||||
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
||||||
|
|
||||||
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
||||||
crystallite_partionedF(1:3,1:3,1,ip,el) = materialpoint_subF(1:3,1:3,ip,el)
|
crystallite_partionedF(1:3,1:3,1,ip,el) = subF
|
||||||
|
|
||||||
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
||||||
call mech_isostrain_partitionDeformation(&
|
call mech_isostrain_partitionDeformation(&
|
||||||
|
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
|
subF)
|
||||||
|
|
||||||
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
||||||
|
call mech_RGC_partitionDeformation(&
|
||||||
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
materialpoint_subF(1:3,1:3,ip,el))
|
subF,&
|
||||||
|
ip, &
|
||||||
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
el)
|
||||||
call mech_RGC_partitionDeformation(&
|
end select chosenHomogenization
|
||||||
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
|
||||||
materialpoint_subF(1:3,1:3,ip,el),&
|
|
||||||
ip, &
|
|
||||||
el)
|
|
||||||
end select chosenHomogenization
|
|
||||||
|
|
||||||
end subroutine partitionDeformation
|
end subroutine partitionDeformation
|
||||||
|
|
||||||
|
@ -512,45 +477,49 @@ end subroutine partitionDeformation
|
||||||
!> @brief update the internal state of the homogenization scheme and tell whether "done" and
|
!> @brief update the internal state of the homogenization scheme and tell whether "done" and
|
||||||
!> "happy" with result
|
!> "happy" with result
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
function updateState(ip,el)
|
function updateState(subdt,subF,ip,el)
|
||||||
|
|
||||||
integer, intent(in) :: &
|
real(pReal), intent(in) :: &
|
||||||
ip, & !< integration point
|
subdt !< current time step
|
||||||
el !< element number
|
real(pReal), intent(in), dimension(3,3) :: &
|
||||||
logical, dimension(2) :: updateState
|
subF
|
||||||
|
integer, intent(in) :: &
|
||||||
|
ip, & !< integration point
|
||||||
|
el !< element number
|
||||||
|
logical, dimension(2) :: updateState
|
||||||
|
|
||||||
updateState = .true.
|
updateState = .true.
|
||||||
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
||||||
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
||||||
updateState = &
|
updateState = &
|
||||||
updateState .and. &
|
updateState .and. &
|
||||||
mech_RGC_updateState(crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
mech_RGC_updateState(crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_partionedF(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
crystallite_partionedF0(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el),&
|
crystallite_partionedF0(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el),&
|
||||||
materialpoint_subF(1:3,1:3,ip,el),&
|
subF,&
|
||||||
materialpoint_subdt(ip,el), &
|
subdt, &
|
||||||
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
ip, &
|
ip, &
|
||||||
el)
|
el)
|
||||||
end select chosenHomogenization
|
end select chosenHomogenization
|
||||||
|
|
||||||
chosenThermal: select case (thermal_type(material_homogenizationAt(el)))
|
chosenThermal: select case (thermal_type(material_homogenizationAt(el)))
|
||||||
case (THERMAL_adiabatic_ID) chosenThermal
|
case (THERMAL_adiabatic_ID) chosenThermal
|
||||||
updateState = &
|
updateState = &
|
||||||
updateState .and. &
|
updateState .and. &
|
||||||
thermal_adiabatic_updateState(materialpoint_subdt(ip,el), &
|
thermal_adiabatic_updateState(subdt, &
|
||||||
ip, &
|
ip, &
|
||||||
el)
|
el)
|
||||||
end select chosenThermal
|
end select chosenThermal
|
||||||
|
|
||||||
chosenDamage: select case (damage_type(material_homogenizationAt(el)))
|
chosenDamage: select case (damage_type(material_homogenizationAt(el)))
|
||||||
case (DAMAGE_local_ID) chosenDamage
|
case (DAMAGE_local_ID) chosenDamage
|
||||||
updateState = &
|
updateState = &
|
||||||
updateState .and. &
|
updateState .and. &
|
||||||
damage_local_updateState(materialpoint_subdt(ip,el), &
|
damage_local_updateState(subdt, &
|
||||||
ip, &
|
ip, &
|
||||||
el)
|
el)
|
||||||
end select chosenDamage
|
end select chosenDamage
|
||||||
|
|
||||||
end function updateState
|
end function updateState
|
||||||
|
|
||||||
|
@ -560,31 +529,31 @@ end function updateState
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine averageStressAndItsTangent(ip,el)
|
subroutine averageStressAndItsTangent(ip,el)
|
||||||
|
|
||||||
integer, intent(in) :: &
|
integer, intent(in) :: &
|
||||||
ip, & !< integration point
|
ip, & !< integration point
|
||||||
el !< element number
|
el !< element number
|
||||||
|
|
||||||
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
chosenHomogenization: select case(homogenization_type(material_homogenizationAt(el)))
|
||||||
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
case (HOMOGENIZATION_NONE_ID) chosenHomogenization
|
||||||
materialpoint_P(1:3,1:3,ip,el) = crystallite_P(1:3,1:3,1,ip,el)
|
materialpoint_P(1:3,1:3,ip,el) = crystallite_P(1:3,1:3,1,ip,el)
|
||||||
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el) = crystallite_dPdF(1:3,1:3,1:3,1:3,1,ip,el)
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el) = crystallite_dPdF(1:3,1:3,1:3,1:3,1,ip,el)
|
||||||
|
|
||||||
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
case (HOMOGENIZATION_ISOSTRAIN_ID) chosenHomogenization
|
||||||
call mech_isostrain_averageStressAndItsTangent(&
|
call mech_isostrain_averageStressAndItsTangent(&
|
||||||
materialpoint_P(1:3,1:3,ip,el), &
|
materialpoint_P(1:3,1:3,ip,el), &
|
||||||
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
||||||
crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
homogenization_typeInstance(material_homogenizationAt(el)))
|
homogenization_typeInstance(material_homogenizationAt(el)))
|
||||||
|
|
||||||
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
case (HOMOGENIZATION_RGC_ID) chosenHomogenization
|
||||||
call mech_RGC_averageStressAndItsTangent(&
|
call mech_RGC_averageStressAndItsTangent(&
|
||||||
materialpoint_P(1:3,1:3,ip,el), &
|
materialpoint_P(1:3,1:3,ip,el), &
|
||||||
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
materialpoint_dPdF(1:3,1:3,1:3,1:3,ip,el),&
|
||||||
crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_P(1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
crystallite_dPdF(1:3,1:3,1:3,1:3,1:homogenization_Ngrains(material_homogenizationAt(el)),ip,el), &
|
||||||
homogenization_typeInstance(material_homogenizationAt(el)))
|
homogenization_typeInstance(material_homogenizationAt(el)))
|
||||||
end select chosenHomogenization
|
end select chosenHomogenization
|
||||||
|
|
||||||
end subroutine averageStressAndItsTangent
|
end subroutine averageStressAndItsTangent
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@ module material
|
||||||
use results
|
use results
|
||||||
use IO
|
use IO
|
||||||
use debug
|
use debug
|
||||||
use numerics
|
|
||||||
use rotations
|
use rotations
|
||||||
use discretization
|
use discretization
|
||||||
|
|
||||||
|
@ -174,8 +173,7 @@ module material
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
material_init, &
|
material_init, &
|
||||||
material_allocatePlasticState, &
|
material_allocateState, &
|
||||||
material_allocateSourceState, &
|
|
||||||
ELASTICITY_HOOKE_ID ,&
|
ELASTICITY_HOOKE_ID ,&
|
||||||
PLASTICITY_NONE_ID, &
|
PLASTICITY_NONE_ID, &
|
||||||
PLASTICITY_ISOTROPIC_ID, &
|
PLASTICITY_ISOTROPIC_ID, &
|
||||||
|
@ -700,63 +698,35 @@ end subroutine material_parseTexture
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief allocates the plastic state of a phase
|
!> @brief Allocate the components of the state structure for a given phase
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
subroutine material_allocatePlasticState(phase,NipcMyPhase,&
|
subroutine material_allocateState(state, &
|
||||||
sizeState,sizeDotState,sizeDeltaState)
|
NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
|
||||||
|
|
||||||
|
class(tState), intent(out) :: &
|
||||||
|
state
|
||||||
integer, intent(in) :: &
|
integer, intent(in) :: &
|
||||||
phase, &
|
|
||||||
NipcMyPhase, &
|
NipcMyPhase, &
|
||||||
sizeState, &
|
sizeState, &
|
||||||
sizeDotState, &
|
sizeDotState, &
|
||||||
sizeDeltaState
|
sizeDeltaState
|
||||||
|
|
||||||
plasticState(phase)%sizeState = sizeState
|
state%sizeState = sizeState
|
||||||
plasticState(phase)%sizeDotState = sizeDotState
|
state%sizeDotState = sizeDotState
|
||||||
plasticState(phase)%sizeDeltaState = sizeDeltaState
|
state%sizeDeltaState = sizeDeltaState
|
||||||
plasticState(phase)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
state%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
||||||
|
|
||||||
allocate(plasticState(phase)%atol (sizeState), source=0.0_pReal)
|
allocate(state%atol (sizeState), source=0.0_pReal)
|
||||||
allocate(plasticState(phase)%state0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
allocate(state%state0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
||||||
allocate(plasticState(phase)%partionedState0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
allocate(state%partionedState0(sizeState,NipcMyPhase), source=0.0_pReal)
|
||||||
allocate(plasticState(phase)%subState0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
allocate(state%subState0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
||||||
allocate(plasticState(phase)%state (sizeState,NipcMyPhase), source=0.0_pReal)
|
allocate(state%state (sizeState,NipcMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
allocate(plasticState(phase)%dotState (sizeDotState,NipcMyPhase),source=0.0_pReal)
|
allocate(state%dotState (sizeDotState,NipcMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
allocate(plasticState(phase)%deltaState (sizeDeltaState,NipcMyPhase),source=0.0_pReal)
|
allocate(state%deltaState(sizeDeltaState,NipcMyPhase), source=0.0_pReal)
|
||||||
|
|
||||||
end subroutine material_allocatePlasticState
|
end subroutine material_allocateState
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
!> @brief allocates the source state of a phase
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
|
||||||
subroutine material_allocateSourceState(phase,of,NipcMyPhase,&
|
|
||||||
sizeState,sizeDotState,sizeDeltaState)
|
|
||||||
|
|
||||||
integer, intent(in) :: &
|
|
||||||
phase, &
|
|
||||||
of, &
|
|
||||||
NipcMyPhase, &
|
|
||||||
sizeState, sizeDotState,sizeDeltaState
|
|
||||||
|
|
||||||
sourceState(phase)%p(of)%sizeState = sizeState
|
|
||||||
sourceState(phase)%p(of)%sizeDotState = sizeDotState
|
|
||||||
sourceState(phase)%p(of)%sizeDeltaState = sizeDeltaState
|
|
||||||
sourceState(phase)%p(of)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
|
||||||
|
|
||||||
allocate(sourceState(phase)%p(of)%atol (sizeState), source=0.0_pReal)
|
|
||||||
allocate(sourceState(phase)%p(of)%state0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
|
||||||
allocate(sourceState(phase)%p(of)%partionedState0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
|
||||||
allocate(sourceState(phase)%p(of)%subState0 (sizeState,NipcMyPhase), source=0.0_pReal)
|
|
||||||
allocate(sourceState(phase)%p(of)%state (sizeState,NipcMyPhase), source=0.0_pReal)
|
|
||||||
|
|
||||||
allocate(sourceState(phase)%p(of)%dotState (sizeDotState,NipcMyPhase),source=0.0_pReal)
|
|
||||||
|
|
||||||
allocate(sourceState(phase)%p(of)%deltaState (sizeDeltaState,NipcMyPhase),source=0.0_pReal)
|
|
||||||
|
|
||||||
end subroutine material_allocateSourceState
|
|
||||||
|
|
||||||
end module material
|
end module material
|
||||||
|
|
|
@ -20,8 +20,7 @@ module numerics
|
||||||
iJacoStiffness = 1, & !< frequency of stiffness update
|
iJacoStiffness = 1, & !< frequency of stiffness update
|
||||||
randomSeed = 0, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed
|
randomSeed = 0, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed
|
||||||
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
|
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
|
||||||
worldsize = 1, & !< MPI worldsize (/=1 for MPI simulations only)
|
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
|
||||||
numerics_integrator = 1 !< method used for state integration Default 1: fix-point iteration
|
|
||||||
integer(4), protected, public :: &
|
integer(4), protected, public :: &
|
||||||
DAMASK_NumThreadsInt = 0 !< value stored in environment variable DAMASK_NUM_THREADS, set to zero if no OpenMP directive
|
DAMASK_NumThreadsInt = 0 !< value stored in environment variable DAMASK_NUM_THREADS, set to zero if no OpenMP directive
|
||||||
real(pReal), protected, public :: &
|
real(pReal), protected, public :: &
|
||||||
|
@ -134,8 +133,6 @@ subroutine numerics_init
|
||||||
defgradTolerance = IO_floatValue(line,chunkPos,2)
|
defgradTolerance = IO_floatValue(line,chunkPos,2)
|
||||||
case ('ijacostiffness')
|
case ('ijacostiffness')
|
||||||
iJacoStiffness = IO_intValue(line,chunkPos,2)
|
iJacoStiffness = IO_intValue(line,chunkPos,2)
|
||||||
case ('integrator')
|
|
||||||
numerics_integrator = IO_intValue(line,chunkPos,2)
|
|
||||||
case ('usepingpong')
|
case ('usepingpong')
|
||||||
usepingpong = IO_intValue(line,chunkPos,2) > 0
|
usepingpong = IO_intValue(line,chunkPos,2) > 0
|
||||||
case ('unitlength')
|
case ('unitlength')
|
||||||
|
@ -176,6 +173,11 @@ subroutine numerics_init
|
||||||
case ('maxstaggerediter')
|
case ('maxstaggerediter')
|
||||||
stagItMax = IO_intValue(line,chunkPos,2)
|
stagItMax = IO_intValue(line,chunkPos,2)
|
||||||
|
|
||||||
|
#ifdef PETSC
|
||||||
|
case ('petsc_options')
|
||||||
|
petsc_options = trim(line(chunkPos(4):))
|
||||||
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! spectral parameters
|
! spectral parameters
|
||||||
#ifdef Grid
|
#ifdef Grid
|
||||||
|
@ -187,8 +189,6 @@ subroutine numerics_init
|
||||||
err_stress_tolrel = IO_floatValue(line,chunkPos,2)
|
err_stress_tolrel = IO_floatValue(line,chunkPos,2)
|
||||||
case ('err_stress_tolabs')
|
case ('err_stress_tolabs')
|
||||||
err_stress_tolabs = IO_floatValue(line,chunkPos,2)
|
err_stress_tolabs = IO_floatValue(line,chunkPos,2)
|
||||||
case ('petsc_options')
|
|
||||||
petsc_options = trim(line(chunkPos(4):))
|
|
||||||
case ('err_curl_tolabs')
|
case ('err_curl_tolabs')
|
||||||
err_curl_tolAbs = IO_floatValue(line,chunkPos,2)
|
err_curl_tolAbs = IO_floatValue(line,chunkPos,2)
|
||||||
case ('err_curl_tolrel')
|
case ('err_curl_tolrel')
|
||||||
|
@ -206,8 +206,6 @@ subroutine numerics_init
|
||||||
integrationorder = IO_intValue(line,chunkPos,2)
|
integrationorder = IO_intValue(line,chunkPos,2)
|
||||||
case ('structorder')
|
case ('structorder')
|
||||||
structorder = IO_intValue(line,chunkPos,2)
|
structorder = IO_intValue(line,chunkPos,2)
|
||||||
case ('petsc_options')
|
|
||||||
petsc_options = trim(line(chunkPos(4):))
|
|
||||||
case ('bbarstabilisation')
|
case ('bbarstabilisation')
|
||||||
BBarStabilisation = IO_intValue(line,chunkPos,2) > 0
|
BBarStabilisation = IO_intValue(line,chunkPos,2) > 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -223,7 +221,6 @@ subroutine numerics_init
|
||||||
! writing parameters to output
|
! writing parameters to output
|
||||||
write(6,'(a24,1x,es8.1)') ' defgradTolerance: ',defgradTolerance
|
write(6,'(a24,1x,es8.1)') ' defgradTolerance: ',defgradTolerance
|
||||||
write(6,'(a24,1x,i8)') ' iJacoStiffness: ',iJacoStiffness
|
write(6,'(a24,1x,i8)') ' iJacoStiffness: ',iJacoStiffness
|
||||||
write(6,'(a24,1x,i8)') ' integrator: ',numerics_integrator
|
|
||||||
write(6,'(a24,1x,L8)') ' use ping pong scheme: ',usepingpong
|
write(6,'(a24,1x,L8)') ' use ping pong scheme: ',usepingpong
|
||||||
write(6,'(a24,1x,es8.1,/)')' unitlength: ',numerics_unitlength
|
write(6,'(a24,1x,es8.1,/)')' unitlength: ',numerics_unitlength
|
||||||
|
|
||||||
|
@ -266,7 +263,6 @@ subroutine numerics_init
|
||||||
write(6,'(a24,1x,es8.1)') ' err_curl_tolRel: ',err_curl_tolRel
|
write(6,'(a24,1x,es8.1)') ' err_curl_tolRel: ',err_curl_tolRel
|
||||||
write(6,'(a24,1x,es8.1)') ' polarAlpha: ',polarAlpha
|
write(6,'(a24,1x,es8.1)') ' polarAlpha: ',polarAlpha
|
||||||
write(6,'(a24,1x,es8.1)') ' polarBeta: ',polarBeta
|
write(6,'(a24,1x,es8.1)') ' polarBeta: ',polarBeta
|
||||||
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -274,16 +270,17 @@ subroutine numerics_init
|
||||||
#ifdef FEM
|
#ifdef FEM
|
||||||
write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder
|
write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder
|
||||||
write(6,'(a24,1x,i8)') ' structOrder: ',structOrder
|
write(6,'(a24,1x,i8)') ' structOrder: ',structOrder
|
||||||
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
|
|
||||||
write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation
|
write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef PETSC
|
||||||
|
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
|
||||||
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! sanity checks
|
! sanity checks
|
||||||
if (defgradTolerance <= 0.0_pReal) call IO_error(301,ext_msg='defgradTolerance')
|
if (defgradTolerance <= 0.0_pReal) call IO_error(301,ext_msg='defgradTolerance')
|
||||||
if (iJacoStiffness < 1) call IO_error(301,ext_msg='iJacoStiffness')
|
if (iJacoStiffness < 1) call IO_error(301,ext_msg='iJacoStiffness')
|
||||||
if (numerics_integrator <= 0 .or. numerics_integrator >= 6) &
|
|
||||||
call IO_error(301,ext_msg='integrator')
|
|
||||||
if (numerics_unitlength <= 0.0_pReal) call IO_error(301,ext_msg='unitlength')
|
if (numerics_unitlength <= 0.0_pReal) call IO_error(301,ext_msg='unitlength')
|
||||||
if (residualStiffness < 0.0_pReal) call IO_error(301,ext_msg='residualStiffness')
|
if (residualStiffness < 0.0_pReal) call IO_error(301,ext_msg='residualStiffness')
|
||||||
if (itmax <= 1) call IO_error(301,ext_msg='itmax')
|
if (itmax <= 1) call IO_error(301,ext_msg='itmax')
|
||||||
|
|
|
@ -53,8 +53,7 @@ module prec
|
||||||
logical :: &
|
logical :: &
|
||||||
nonlocal = .false.
|
nonlocal = .false.
|
||||||
real(pReal), pointer, dimension(:,:) :: &
|
real(pReal), pointer, dimension(:,:) :: &
|
||||||
slipRate, & !< slip rate
|
slipRate !< slip rate
|
||||||
accumulatedSlip !< accumulated plastic slip
|
|
||||||
end type
|
end type
|
||||||
|
|
||||||
type :: tSourceState
|
type :: tSourceState
|
||||||
|
|
|
@ -118,7 +118,7 @@ end subroutine quaternions_init
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> construct a quaternion from a 4-vector
|
!> @brief construct a quaternion from a 4-vector
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) pure function init__(array)
|
type(quaternion) pure function init__(array)
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ end function init__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> assign a quaternion
|
!> @brief assign a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
elemental pure subroutine assign_quat__(self,other)
|
elemental pure subroutine assign_quat__(self,other)
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ end subroutine assign_quat__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> assign a 4-vector
|
!> @brief assign a 4-vector
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
pure subroutine assign_vec__(self,other)
|
pure subroutine assign_vec__(self,other)
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ end subroutine assign_vec__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> add a quaternion
|
!> @brief add a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function add__(self,other)
|
type(quaternion) elemental pure function add__(self,other)
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ end function add__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> return (unary positive operator)
|
!> @brief return (unary positive operator)
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function pos__(self)
|
type(quaternion) elemental pure function pos__(self)
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ end function pos__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> subtract a quaternion
|
!> @brief subtract a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function sub__(self,other)
|
type(quaternion) elemental pure function sub__(self,other)
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ end function sub__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> negate (unary negative operator)
|
!> @brief negate (unary negative operator)
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function neg__(self)
|
type(quaternion) elemental pure function neg__(self)
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ end function neg__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> multiply with a quaternion
|
!> @brief multiply with a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function mul_quat__(self,other)
|
type(quaternion) elemental pure function mul_quat__(self,other)
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ end function mul_quat__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> multiply with a scalar
|
!> @brief multiply with a scalar
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function mul_scal__(self,scal)
|
type(quaternion) elemental pure function mul_scal__(self,scal)
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ end function mul_scal__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> divide by a quaternion
|
!> @brief divide by a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function div_quat__(self,other)
|
type(quaternion) elemental pure function div_quat__(self,other)
|
||||||
|
|
||||||
|
@ -252,7 +252,7 @@ end function div_quat__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> divide by a scalar
|
!> @brief divide by a scalar
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function div_scal__(self,scal)
|
type(quaternion) elemental pure function div_scal__(self,scal)
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ end function div_scal__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> test equality
|
!> @brief test equality
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
logical elemental pure function eq__(self,other)
|
logical elemental pure function eq__(self,other)
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ end function eq__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> test inequality
|
!> @brief test inequality
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
logical elemental pure function neq__(self,other)
|
logical elemental pure function neq__(self,other)
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ end function neq__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> raise to the power of a quaternion
|
!> @brief raise to the power of a quaternion
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function pow_quat__(self,expon)
|
type(quaternion) elemental pure function pow_quat__(self,expon)
|
||||||
|
|
||||||
|
@ -303,7 +303,7 @@ end function pow_quat__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> raise to the power of a scalar
|
!> @brief raise to the power of a scalar
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function pow_scal__(self,expon)
|
type(quaternion) elemental pure function pow_scal__(self,expon)
|
||||||
|
|
||||||
|
@ -316,7 +316,7 @@ end function pow_scal__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> take exponential
|
!> @brief take exponential
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function exp__(a)
|
type(quaternion) elemental pure function exp__(a)
|
||||||
|
|
||||||
|
@ -336,7 +336,7 @@ end function exp__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> take logarithm
|
!> @brief take logarithm
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function log__(a)
|
type(quaternion) elemental pure function log__(a)
|
||||||
|
|
||||||
|
@ -356,7 +356,7 @@ end function log__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> return norm
|
!> @brief return norm
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
real(pReal) elemental pure function abs__(self)
|
real(pReal) elemental pure function abs__(self)
|
||||||
|
|
||||||
|
@ -368,7 +368,7 @@ end function abs__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> calculate dot product
|
!> @brief calculate dot product
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
real(pReal) elemental pure function dot_product__(a,b)
|
real(pReal) elemental pure function dot_product__(a,b)
|
||||||
|
|
||||||
|
@ -380,7 +380,7 @@ end function dot_product__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> take conjugate complex
|
!> @brief take conjugate complex
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function conjg__(self)
|
type(quaternion) elemental pure function conjg__(self)
|
||||||
|
|
||||||
|
@ -392,7 +392,7 @@ end function conjg__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> homomorph
|
!> @brief homomorph
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function homomorphed(self)
|
type(quaternion) elemental pure function homomorphed(self)
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ end function homomorphed
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> return as plain array
|
!> @brief return as plain array
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
pure function asArray(self)
|
pure function asArray(self)
|
||||||
|
|
||||||
|
@ -417,7 +417,7 @@ end function asArray
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> real part (scalar)
|
!> @brief real part (scalar)
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
pure function real__(self)
|
pure function real__(self)
|
||||||
|
|
||||||
|
@ -430,7 +430,7 @@ end function real__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> imaginary part (3-vector)
|
!> @brief imaginary part (3-vector)
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
pure function aimag__(self)
|
pure function aimag__(self)
|
||||||
|
|
||||||
|
@ -443,7 +443,7 @@ end function aimag__
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
!> inverse
|
!> @brief inverse
|
||||||
!---------------------------------------------------------------------------------------------------
|
!---------------------------------------------------------------------------------------------------
|
||||||
type(quaternion) elemental pure function inverse(self)
|
type(quaternion) elemental pure function inverse(self)
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,6 @@ module rotations
|
||||||
use prec
|
use prec
|
||||||
use IO
|
use IO
|
||||||
use math
|
use math
|
||||||
use Lambert
|
|
||||||
use quaternions
|
use quaternions
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
@ -81,6 +80,18 @@ module rotations
|
||||||
procedure, public :: standardize
|
procedure, public :: standardize
|
||||||
end type rotation
|
end type rotation
|
||||||
|
|
||||||
|
real(pReal), parameter :: &
|
||||||
|
SPI = sqrt(PI), &
|
||||||
|
PREF = sqrt(6.0_pReal/PI), &
|
||||||
|
A = PI**(5.0_pReal/6.0_pReal)/6.0_pReal**(1.0_pReal/6.0_pReal), &
|
||||||
|
AP = PI**(2.0_pReal/3.0_pReal), &
|
||||||
|
SC = A/AP, &
|
||||||
|
BETA = A/2.0_pReal, &
|
||||||
|
R1 = (3.0_pReal*PI/4.0_pReal)**(1.0_pReal/3.0_pReal), &
|
||||||
|
R2 = sqrt(2.0_pReal), &
|
||||||
|
PI12 = PI/12.0_pReal, &
|
||||||
|
PREK = R1 * 2.0_pReal**(1.0_pReal/4.0_pReal)/BETA
|
||||||
|
|
||||||
public :: &
|
public :: &
|
||||||
rotations_init, &
|
rotations_init, &
|
||||||
eu2om
|
eu2om
|
||||||
|
@ -432,18 +443,17 @@ pure function qu2eu(qu) result(eu)
|
||||||
real(pReal), intent(in), dimension(4) :: qu
|
real(pReal), intent(in), dimension(4) :: qu
|
||||||
real(pReal), dimension(3) :: eu
|
real(pReal), dimension(3) :: eu
|
||||||
|
|
||||||
real(pReal) :: q12, q03, chi, chiInv
|
real(pReal) :: q12, q03, chi
|
||||||
|
|
||||||
q03 = qu(1)**2+qu(4)**2
|
q03 = qu(1)**2+qu(4)**2
|
||||||
q12 = qu(2)**2+qu(3)**2
|
q12 = qu(2)**2+qu(3)**2
|
||||||
chi = sqrt(q03*q12)
|
chi = sqrt(q03*q12)
|
||||||
|
|
||||||
degenerated: if (dEq0(chi)) then
|
degenerated: if (dEq0(q12)) then
|
||||||
eu = merge([atan2(-P*2.0_pReal*qu(1)*qu(4),qu(1)**2-qu(4)**2), 0.0_pReal, 0.0_pReal], &
|
eu = [atan2(-P*2.0_pReal*qu(1)*qu(4),qu(1)**2-qu(4)**2), 0.0_pReal, 0.0_pReal]
|
||||||
[atan2( 2.0_pReal*qu(2)*qu(3),qu(2)**2-qu(3)**2), PI, 0.0_pReal], &
|
elseif (dEq0(q03)) then
|
||||||
dEq0(q12))
|
eu = [atan2( 2.0_pReal*qu(2)*qu(3),qu(2)**2-qu(3)**2), PI, 0.0_pReal]
|
||||||
else degenerated
|
else degenerated
|
||||||
chiInv = 1.0_pReal/chi
|
|
||||||
eu = [atan2((-P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)-qu(3)*qu(4))*chi ), &
|
eu = [atan2((-P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)-qu(3)*qu(4))*chi ), &
|
||||||
atan2( 2.0_pReal*chi, q03-q12 ), &
|
atan2( 2.0_pReal*chi, q03-q12 ), &
|
||||||
atan2(( P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)+qu(3)*qu(4))*chi )]
|
atan2(( P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)+qu(3)*qu(4))*chi )]
|
||||||
|
@ -517,7 +527,7 @@ pure function qu2ho(qu) result(ho)
|
||||||
|
|
||||||
omega = 2.0 * acos(math_clip(qu(1),-1.0_pReal,1.0_pReal))
|
omega = 2.0 * acos(math_clip(qu(1),-1.0_pReal,1.0_pReal))
|
||||||
|
|
||||||
if (dEq0(omega)) then
|
if (dEq0(omega,tol=1.e-5_pReal)) then
|
||||||
ho = [ 0.0_pReal, 0.0_pReal, 0.0_pReal ]
|
ho = [ 0.0_pReal, 0.0_pReal, 0.0_pReal ]
|
||||||
else
|
else
|
||||||
ho = qu(2:4)
|
ho = qu(2:4)
|
||||||
|
@ -917,7 +927,7 @@ function ax2cu(ax) result(cu)
|
||||||
real(pReal), intent(in), dimension(4) :: ax
|
real(pReal), intent(in), dimension(4) :: ax
|
||||||
real(pReal), dimension(3) :: cu
|
real(pReal), dimension(3) :: cu
|
||||||
|
|
||||||
cu = ho2cu(ax2ho(ax))
|
cu = ho2cu(ax2ho(ax))
|
||||||
|
|
||||||
end function ax2cu
|
end function ax2cu
|
||||||
|
|
||||||
|
@ -1121,16 +1131,56 @@ pure function ho2ro(ho) result(ro)
|
||||||
end function ho2ro
|
end function ho2ro
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
!> @brief convert homochoric to cubochoric
|
!> @brief convert homochoric to cubochoric
|
||||||
!---------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------
|
||||||
pure function ho2cu(ho) result(cu)
|
pure function ho2cu(ho) result(cu)
|
||||||
|
|
||||||
real(pReal), intent(in), dimension(3) :: ho
|
real(pReal), intent(in), dimension(3) :: ho
|
||||||
real(pReal), dimension(3) :: cu
|
real(pReal), dimension(3) :: cu, xyz1, xyz3
|
||||||
|
real(pReal), dimension(2) :: Tinv, xyz2
|
||||||
|
real(pReal) :: rs, qxy, q2, sq2, q, tt
|
||||||
|
integer, dimension(3,2) :: p
|
||||||
|
|
||||||
cu = Lambert_BallToCube(ho)
|
rs = norm2(ho)
|
||||||
|
if (rs > R1+1.e-6_pReal) then
|
||||||
|
cu = IEEE_value(cu,IEEE_positive_inf)
|
||||||
|
return
|
||||||
|
endif
|
||||||
|
|
||||||
|
center: if (all(dEq0(ho))) then
|
||||||
|
cu = 0.0_pReal
|
||||||
|
else center
|
||||||
|
p = GetPyramidOrder(ho)
|
||||||
|
xyz3 = ho(p(:,1))
|
||||||
|
|
||||||
|
! inverse M_3
|
||||||
|
xyz2 = xyz3(1:2) * sqrt( 2.0*rs/(rs+abs(xyz3(3))) )
|
||||||
|
|
||||||
|
! inverse M_2
|
||||||
|
qxy = sum(xyz2**2)
|
||||||
|
|
||||||
|
special: if (dEq0(qxy)) then
|
||||||
|
Tinv = 0.0_pReal
|
||||||
|
else special
|
||||||
|
q2 = qxy + maxval(abs(xyz2))**2
|
||||||
|
sq2 = sqrt(q2)
|
||||||
|
q = (beta/R2/R1) * sqrt(q2*qxy/(q2-maxval(abs(xyz2))*sq2))
|
||||||
|
tt = (minval(abs(xyz2))**2+maxval(abs(xyz2))*sq2)/R2/qxy
|
||||||
|
Tinv = q * sign(1.0_pReal,xyz2) * merge([ 1.0_pReal, acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12], &
|
||||||
|
[ acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12, 1.0_pReal], &
|
||||||
|
abs(xyz2(2)) <= abs(xyz2(1)))
|
||||||
|
endif special
|
||||||
|
|
||||||
|
! inverse M_1
|
||||||
|
xyz1 = [ Tinv(1), Tinv(2), sign(1.0_pReal,xyz3(3)) * rs / pref ] /sc
|
||||||
|
|
||||||
|
! reverse the coordinates back to order according to the original pyramid number
|
||||||
|
cu = xyz1(p(:,2))
|
||||||
|
|
||||||
|
endif center
|
||||||
|
|
||||||
end function ho2cu
|
end function ho2cu
|
||||||
|
|
||||||
|
@ -1205,20 +1255,88 @@ pure function cu2ro(cu) result(ro)
|
||||||
end function cu2ro
|
end function cu2ro
|
||||||
|
|
||||||
|
|
||||||
!---------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------
|
||||||
!> @author Marc De Graef, Carnegie Mellon University
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
!> @brief convert cubochoric to homochoric
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
!---------------------------------------------------------------------------------------------------
|
!> @brief map from 3D cubic grid to 3D ball
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
pure function cu2ho(cu) result(ho)
|
pure function cu2ho(cu) result(ho)
|
||||||
|
|
||||||
real(pReal), intent(in), dimension(3) :: cu
|
real(pReal), intent(in), dimension(3) :: cu
|
||||||
real(pReal), dimension(3) :: ho
|
real(pReal), dimension(3) :: ho, LamXYZ, XYZ
|
||||||
|
real(pReal), dimension(2) :: T
|
||||||
|
real(pReal) :: c, s, q
|
||||||
|
real(pReal), parameter :: eps = 1.0e-8_pReal
|
||||||
|
integer, dimension(3,2) :: p
|
||||||
|
integer, dimension(2) :: order
|
||||||
|
|
||||||
ho = Lambert_CubeToBall(cu)
|
if (maxval(abs(cu)) > AP/2.0+eps) then
|
||||||
|
ho = IEEE_value(cu,IEEE_positive_inf)
|
||||||
|
return
|
||||||
|
end if
|
||||||
|
|
||||||
|
! transform to the sphere grid via the curved square, and intercept the zero point
|
||||||
|
center: if (all(dEq0(cu))) then
|
||||||
|
ho = 0.0_pReal
|
||||||
|
else center
|
||||||
|
! get pyramide and scale by grid parameter ratio
|
||||||
|
p = GetPyramidOrder(cu)
|
||||||
|
XYZ = cu(p(:,1)) * sc
|
||||||
|
|
||||||
|
! intercept all the points along the z-axis
|
||||||
|
special: if (all(dEq0(XYZ(1:2)))) then
|
||||||
|
LamXYZ = [ 0.0_pReal, 0.0_pReal, pref * XYZ(3) ]
|
||||||
|
else special
|
||||||
|
order = merge( [2,1], [1,2], abs(XYZ(2)) <= abs(XYZ(1))) ! order of absolute values of XYZ
|
||||||
|
q = PI12 * XYZ(order(1))/XYZ(order(2)) ! smaller by larger
|
||||||
|
c = cos(q)
|
||||||
|
s = sin(q)
|
||||||
|
q = prek * XYZ(order(2))/ sqrt(R2-c)
|
||||||
|
T = [ (R2*c - 1.0), R2 * s] * q
|
||||||
|
|
||||||
|
! transform to sphere grid (inverse Lambert)
|
||||||
|
! [note that there is no need to worry about dividing by zero, since XYZ(3) can not become zero]
|
||||||
|
c = sum(T**2)
|
||||||
|
s = Pi * c/(24.0*XYZ(3)**2)
|
||||||
|
c = sPi * c / sqrt(24.0_pReal) / XYZ(3)
|
||||||
|
q = sqrt( 1.0 - s )
|
||||||
|
LamXYZ = [ T(order(2)) * q, T(order(1)) * q, pref * XYZ(3) - c ]
|
||||||
|
endif special
|
||||||
|
|
||||||
|
! reverse the coordinates back to order according to the original pyramid number
|
||||||
|
ho = LamXYZ(p(:,2))
|
||||||
|
|
||||||
|
endif center
|
||||||
|
|
||||||
end function cu2ho
|
end function cu2ho
|
||||||
|
|
||||||
|
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
!> @author Marc De Graef, Carnegie Mellon University
|
||||||
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
|
!> @brief determine to which pyramid a point in a cubic grid belongs
|
||||||
|
!--------------------------------------------------------------------------
|
||||||
|
pure function GetPyramidOrder(xyz)
|
||||||
|
|
||||||
|
real(pReal),intent(in),dimension(3) :: xyz
|
||||||
|
integer, dimension(3,2) :: GetPyramidOrder
|
||||||
|
|
||||||
|
if (((abs(xyz(1)) <= xyz(3)).and.(abs(xyz(2)) <= xyz(3))) .or. &
|
||||||
|
((abs(xyz(1)) <= -xyz(3)).and.(abs(xyz(2)) <= -xyz(3)))) then
|
||||||
|
GetPyramidOrder = reshape([[1,2,3],[1,2,3]],[3,2])
|
||||||
|
else if (((abs(xyz(3)) <= xyz(1)).and.(abs(xyz(2)) <= xyz(1))) .or. &
|
||||||
|
((abs(xyz(3)) <= -xyz(1)).and.(abs(xyz(2)) <= -xyz(1)))) then
|
||||||
|
GetPyramidOrder = reshape([[2,3,1],[3,1,2]],[3,2])
|
||||||
|
else if (((abs(xyz(1)) <= xyz(2)).and.(abs(xyz(3)) <= xyz(2))) .or. &
|
||||||
|
((abs(xyz(1)) <= -xyz(2)).and.(abs(xyz(3)) <= -xyz(2)))) then
|
||||||
|
GetPyramidOrder = reshape([[3,1,2],[2,3,1]],[3,2])
|
||||||
|
else
|
||||||
|
GetPyramidOrder = -1 ! should be impossible, but might simplify debugging
|
||||||
|
end if
|
||||||
|
|
||||||
|
end function GetPyramidOrder
|
||||||
|
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief check correctness of some rotations functions
|
!> @brief check correctness of some rotations functions
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -107,7 +107,7 @@ subroutine source_damage_anisoBrittle_init
|
||||||
if (any(prm%critDisp < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_critDisp'
|
if (any(prm%critDisp < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_critDisp'
|
||||||
|
|
||||||
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
|
||||||
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisobrittle_atol',defaultVal=1.0e-3_pReal)
|
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisobrittle_atol',defaultVal=1.0e-3_pReal)
|
||||||
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_atol'
|
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_atol'
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ subroutine source_damage_anisoDuctile_init
|
||||||
if (any(prm%critPlasticStrain < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_criticalplasticstrain'
|
if (any(prm%critPlasticStrain < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_criticalplasticstrain'
|
||||||
|
|
||||||
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
|
||||||
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisoductile_atol',defaultVal=1.0e-3_pReal)
|
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisoductile_atol',defaultVal=1.0e-3_pReal)
|
||||||
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_atol'
|
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_atol'
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ subroutine source_damage_isoBrittle_init
|
||||||
if (prm%critStrainEnergy <= 0.0_pReal) extmsg = trim(extmsg)//' isobrittle_criticalstrainenergy'
|
if (prm%critStrainEnergy <= 0.0_pReal) extmsg = trim(extmsg)//' isobrittle_criticalstrainenergy'
|
||||||
|
|
||||||
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,1)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,1)
|
||||||
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isobrittle_atol',defaultVal=1.0e-3_pReal)
|
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isobrittle_atol',defaultVal=1.0e-3_pReal)
|
||||||
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isobrittle_atol'
|
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isobrittle_atol'
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ subroutine source_damage_isoDuctile_init
|
||||||
if (prm%critPlasticStrain <= 0.0_pReal) extmsg = trim(extmsg)//' isoductile_criticalplasticstrain'
|
if (prm%critPlasticStrain <= 0.0_pReal) extmsg = trim(extmsg)//' isoductile_criticalplasticstrain'
|
||||||
|
|
||||||
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
|
||||||
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isoductile_atol',defaultVal=1.0e-3_pReal)
|
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isoductile_atol',defaultVal=1.0e-3_pReal)
|
||||||
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isoductile_atol'
|
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isoductile_atol'
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ subroutine source_thermal_dissipation_init
|
||||||
prm%kappa = config%getFloat('dissipation_coldworkcoeff')
|
prm%kappa = config%getFloat('dissipation_coldworkcoeff')
|
||||||
|
|
||||||
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,0,0,0)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,0,0,0)
|
||||||
|
|
||||||
end associate
|
end associate
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -74,7 +74,7 @@ subroutine source_thermal_externalheat_init
|
||||||
prm%heat_rate = config%getFloats('externalheat_rate',requiredSize = size(prm%time))
|
prm%heat_rate = config%getFloats('externalheat_rate',requiredSize = size(prm%time))
|
||||||
|
|
||||||
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
|
||||||
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0)
|
call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
|
||||||
|
|
||||||
end associate
|
end associate
|
||||||
enddo
|
enddo
|
||||||
|
|
Loading…
Reference in New Issue