Merge branch 'development' into YAML-Prerequisites

This commit is contained in:
Martin Diehl 2020-05-07 23:11:38 +02:00
commit 4f8c7ea2d5
67 changed files with 1011 additions and 2053 deletions

View File

@ -203,7 +203,6 @@ Post_OrientationConversion:
stage: postprocessing stage: postprocessing
script: script:
- OrientationConversion/test.py - OrientationConversion/test.py
- OrientationConversion/test2.py
except: except:
- master - master
- release - release

@ -1 +1 @@
Subproject commit 232a094c715bcbbd1c6652c4dc4a4a50d402b82f Subproject commit c595994cd8880acadf50b5dedb79156d04d35b91

View File

@ -1 +1 @@
v2.0.3-2311-gf1afc159 v2.0.3-2412-g0d03c469

View File

@ -16,10 +16,6 @@ if not os.path.isdir(binDir):
#define ToDo list #define ToDo list
processing_subDirs = ['pre', processing_subDirs = ['pre',
'post', 'post',
'misc',
]
processing_extensions = ['.py',
'.sh',
] ]
sys.stdout.write('\nsymbolic linking...\n') sys.stdout.write('\nsymbolic linking...\n')
@ -31,7 +27,7 @@ for subDir in processing_subDirs:
for theFile in os.listdir(theDir): for theFile in os.listdir(theDir):
theName,theExt = os.path.splitext(theFile) theName,theExt = os.path.splitext(theFile)
if theExt in processing_extensions: # only consider files with proper extensions if theExt in ['.py']:
src = os.path.abspath(os.path.join(theDir,theFile)) src = os.path.abspath(os.path.join(theDir,theFile))
sym_link = os.path.abspath(os.path.join(binDir,theName)) sym_link = os.path.abspath(os.path.join(binDir,theName))

View File

@ -1,30 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [angfile[s]]', description = """
Convert TSL/EDAX *.ang file to ASCIItable
""", version = scriptID)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ang(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt')

View File

@ -33,7 +33,7 @@ for filename in options.filenames:
results = damask.Result(filename) results = damask.Result(filename)
if not results.structured: continue if not results.structured: continue
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin) coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin).reshape(-1,3,order='F')
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1 N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
N_digits = 5 # hack to keep test intact N_digits = 5 # hack to keep test intact

View File

@ -1,49 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column containing Cauchy stress based on deformation gradient and first Piola--Kirchhoff stress.
""", version = scriptID)
parser.add_option('-f','--defgrad',
dest = 'defgrad',
type = 'string', metavar = 'string',
help = 'heading of columns containing deformation gradient [%default]')
parser.add_option('-p','--stress',
dest = 'stress',
type = 'string', metavar = 'string',
help = 'heading of columns containing first Piola--Kirchhoff stress [%default]')
parser.set_defaults(defgrad = 'f',
stress = 'p',
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.add('Cauchy',
damask.mechanics.Cauchy(table.get(options.stress ).reshape(-1,3,3),
table.get(options.defgrad).reshape(-1,3,3)).reshape(-1,9),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -16,8 +16,8 @@ scriptID = ' '.join([scriptName,damask.version])
def volTetrahedron(coords): def volTetrahedron(coords):
""" """
Return the volume of the tetrahedron with given vertices or sides. Return the volume of the tetrahedron with given vertices or sides.
Ifvertices are given they must be in a NumPy array with shape (4,3): the If vertices are given they must be in a NumPy array with shape (4,3): the
position vectors of the 4 vertices in 3 dimensions; if the six sides are position vectors of the 4 vertices in 3 dimensions; if the six sides are
given, they must be an array of length 6. If both are given, the sides given, they must be an array of length 6. If both are given, the sides
will be used in the calculation. will be used in the calculation.
@ -62,19 +62,18 @@ def volTetrahedron(coords):
def volumeMismatch(size,F,nodes): def volumeMismatch(size,F,nodes):
""" """
Calculates the volume mismatch. Calculates the volume mismatch.
volume mismatch is defined as the difference between volume of reconstructed volume mismatch is defined as the difference between volume of reconstructed
(compatible) cube and determinant of deformation gradient at Fourier point. (compatible) cube and determinant of deformation gradient at Fourier point.
""" """
coords = np.empty([8,3]) coords = np.empty([8,3])
vMismatch = np.empty(grid[::-1]) vMismatch = np.empty(F.shape[:3])
volInitial = size.prod()/grid.prod()
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# calculate actual volume and volume resulting from deformation gradient # calculate actual volume and volume resulting from deformation gradient
for k in range(grid[2]): for k in range(grid[0]):
for j in range(grid[1]): for j in range(grid[1]):
for i in range(grid[0]): for i in range(grid[2]):
coords[0,0:3] = nodes[k, j, i ,0:3] coords[0,0:3] = nodes[k, j, i ,0:3]
coords[1,0:3] = nodes[k ,j, i+1,0:3] coords[1,0:3] = nodes[k ,j, i+1,0:3]
coords[2,0:3] = nodes[k ,j+1,i+1,0:3] coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
@ -91,47 +90,45 @@ def volumeMismatch(size,F,nodes):
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \ + abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \ + abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \
/np.linalg.det(F[k,j,i,0:3,0:3]) /np.linalg.det(F[k,j,i,0:3,0:3])
return vMismatch/volInitial return vMismatch/(size.prod()/grid.prod())
def shapeMismatch(size,F,nodes,centres): def shapeMismatch(size,F,nodes,centres):
""" """
Routine to calculate the shape mismatch. Routine to calculate the shape mismatch.
shape mismatch is defined as difference between the vectors from the central point to shape mismatch is defined as difference between the vectors from the central point to
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
the initial volume element with the current deformation gradient. the initial volume element with the current deformation gradient.
""" """
coordsInitial = np.empty([8,3]) sMismatch = np.empty(F.shape[:3])
sMismatch = np.empty(grid[::-1])
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# initial positions # initial positions
coordsInitial[0,0:3] = [-size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]] delta = size/grid*.5
coordsInitial[1,0:3] = [+size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]] coordsInitial = np.vstack((delta * np.array((-1,-1,-1)),
coordsInitial[2,0:3] = [+size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]] delta * np.array((+1,-1,-1)),
coordsInitial[3,0:3] = [-size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]] delta * np.array((+1,+1,-1)),
coordsInitial[4,0:3] = [-size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]] delta * np.array((-1,+1,-1)),
coordsInitial[5,0:3] = [+size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]] delta * np.array((-1,-1,+1)),
coordsInitial[6,0:3] = [+size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]] delta * np.array((+1,-1,+1)),
coordsInitial[7,0:3] = [-size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]] delta * np.array((+1,+1,+1)),
coordsInitial = coordsInitial/2.0 delta * np.array((-1,+1,+1))))
#-------------------------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------
# compare deformed original and deformed positions to actual positions # compare deformed original and deformed positions to actual positions
for k in range(grid[2]): for k in range(grid[0]):
for j in range(grid[1]): for j in range(grid[1]):
for i in range(grid[0]): for i in range(grid[2]):
sMismatch[k,j,i] = \ sMismatch[k,j,i] = \
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\ + np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\ + np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
+ np.linalg.norm(nodes[k, j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\ + np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
+ np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\ + np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\ + np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\ + np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\ + np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3])) + np.linalg.norm(nodes[k ,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
return sMismatch return sMismatch
@ -174,24 +171,24 @@ if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
F = table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3) F = table.get(options.defgrad).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
nodes = damask.grid_filters.node_coord(size,F) nodes = damask.grid_filters.node_coord(size,F)
if options.shape: if options.shape:
centers = damask.grid_filters.cell_coord(size,F) centers = damask.grid_filters.cell_coord(size,F)
shapeMismatch = shapeMismatch( size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes,centers) shapeMismatch = shapeMismatch(size,F,nodes,centers)
table.add('shapeMismatch(({}))'.format(options.defgrad), table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape(-1,1), shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
if options.volume: if options.volume:
volumeMismatch = volumeMismatch(size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes) volumeMismatch = volumeMismatch(size,F,nodes)
table.add('volMismatch(({}))'.format(options.defgrad), table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1), volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name) table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels: for label in options.labels:
field = table.get(label) field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape)) field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
curl = damask.grid_filters.curl(size,field)
table.add('curlFFT({})'.format(label), table.add('curlFFT({})'.format(label),
damask.grid_filters.curl(size[::-1],field).reshape(-1,np.prod(shape)), curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name) table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add column(s) containing determinant of requested tensor column(s).
""", version = scriptID)
parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.tensor is None:
parser.error('no data column specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for tensor in options.tensor:
table.add('det({})'.format(tensor),
np.linalg.det(table.get(tensor).reshape(-1,3,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,51 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(2)]', description = """
Add column(s) containing deviator of requested tensor column(s).
""", version = scriptID)
parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar='<string LIST>',
help = 'heading of columns containing tensor field values')
parser.add_option('-s','--spherical',
dest = 'spherical',
action = 'store_true',
help = 'report spherical part of tensor (hydrostatic component, pressure)')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.tensor is None:
parser.error('no data column specified...')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for tensor in options.tensor:
table.add('dev({})'.format(tensor),
damask.mechanics.deviatoric_part(table.get(tensor).reshape(-1,3,3)).reshape(-1,9),
scriptID+' '+' '.join(sys.argv[1:]))
if options.spherical:
table.add('sph({})'.format(tensor),
damask.mechanics.spherical_part(table.get(tensor).reshape(-1,3,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -5,8 +5,6 @@ import sys
from io import StringIO from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
@ -51,23 +49,23 @@ for name in filenames:
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos)) grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
F = table.get(options.f).reshape(np.append(grid[::-1],(3,3))) F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
if options.nodal: if options.nodal:
table = damask.Table(damask.grid_filters.node_coord0(grid[::-1],size[::-1]).reshape(-1,3), table = damask.Table(damask.grid_filters.node_coord0(grid,size).reshape(-1,3,order='F'),
{'pos':(3,)}) {'pos':(3,)})
table.add('avg({}).{}'.format(options.f,options.pos), table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size[::-1],F).reshape(-1,3), damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos), table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size[::-1],F).reshape(-1,3), damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt') table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
else: else:
table.add('avg({}).{}'.format(options.f,options.pos), table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size[::-1],F).reshape(-1,3), damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos), table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size[::-1],F).reshape(-1,3), damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name) table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels: for label in options.labels:
field = table.get(label) field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape)) field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
div = damask.grid_filters.divergence(size,field)
table.add('divFFT({})'.format(label), table.add('divFFT({})'.format(label),
damask.grid_filters.divergence(size[::-1],field).reshape(-1,np.prod(shape)//3), div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name) table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels: for label in options.labels:
field = table.get(label) field = table.get(label)
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
field = field.reshape(np.append(grid[::-1],shape)) field = field.reshape(tuple(grid)+(-1,),order='F')
grad = damask.grid_filters.gradient(size,field)
table.add('gradFFT({})'.format(label), table.add('gradFFT({})'.format(label),
damask.grid_filters.gradient(size[::-1],field).reshape(-1,np.prod(shape)*3), grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:])) scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name) table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,41 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add info lines to ASCIItable header.
""", version = scriptID)
parser.add_option('-i',
'--info',
dest = 'info', action = 'extend', metavar = '<string LIST>',
help = 'items to add')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.info is None:
parser.error('no info specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.comments += options.info
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,56 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add vonMises equivalent values for symmetric part of requested strains and/or stresses.
""", version = scriptID)
parser.add_option('-e','--strain',
dest = 'strain',
action = 'extend', metavar = '<string LIST>',
help = 'heading(s) of columns containing strain tensors')
parser.add_option('-s','--stress',
dest = 'stress',
action = 'extend', metavar = '<string LIST>',
help = 'heading(s) of columns containing stress tensors')
parser.set_defaults(strain = [],
stress = [],
)
(options,filenames) = parser.parse_args()
if options.stress is [] and options.strain is []:
parser.error('no data column specified...')
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for strain in options.strain:
table.add('Mises({})'.format(strain),
damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get(strain).reshape(-1,3,3))),
scriptID+' '+' '.join(sys.argv[1:]))
for stress in options.stress:
table.add('Mises({})'.format(stress),
damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get(stress).reshape(-1,3,3))),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -172,7 +172,7 @@ for name in filenames:
elif inputtype == 'matrix': elif inputtype == 'matrix':
d = representations['matrix'][1] d = representations['matrix'][1]
o = damask.Rotation.fromMatrix(list(map(float,table.data[column:column+d]))) o = damask.Rotation.fromMatrix(np.array(list(map(float,table.data[column:column+d]))).reshape(3,3))
elif inputtype == 'frame': elif inputtype == 'frame':
M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \ M = np.array(list(map(float,table.data[column[0]:column[0]+3] + \

View File

@ -214,7 +214,7 @@ for name in filenames:
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
o = damask.Rotation(list(map(float,table.data[column:column+4]))) o = damask.Rotation(np.array(list(map(float,table.data[column:column+4]))))
table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \ table.data_append( np.abs( np.sum(slip_direction * (o * force) ,axis=1) \
* np.sum(slip_normal * (o * normal),axis=1))) * np.sum(slip_normal * (o * normal),axis=1)))

View File

@ -1,44 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Append data of ASCIItable(s) column-wise.
""", version = scriptID)
parser.add_option('-a', '--add','--table',
dest = 'table',
action = 'extend', metavar = '<string LIST>',
help = 'tables to add')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.table is None:
parser.error('no table specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for addTable in options.table:
table2 = damask.Table.from_ASCII(addTable)
table2.data = table2.data[:table.data.shape[0]]
table.join(table2)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -61,7 +61,7 @@ if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift)
for name in filenames: for name in filenames:
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
if (options.grid is None or options.size is None): if (options.grid is None or options.size is None):
@ -87,11 +87,11 @@ for name in filenames:
origin = list(-(packing//2)) + [0])\ origin = list(-(packing//2)) + [0])\
[::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),-1),order = 'F') [::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),-1),order = 'F')
table = damask.Table(averagedDown,table.shapes,table.comments) table = damask.Table(averagedDown,table.shapes,table.comments)
coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin) coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin)
table.set(options.pos, coords.reshape(-1,3)) table.set(options.pos, coords.reshape(-1,3,order='F'))
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name)) outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))

View File

@ -59,13 +59,13 @@ for name in filenames:
packing = np.array(options.packing,'i') packing = np.array(options.packing,'i')
outSize = grid*packing outSize = grid*packing
data = table.data.values.reshape(tuple(grid)+(-1,)) data = table.data.values.reshape(tuple(grid)+(-1,),order='F')
blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1) blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1,order='F')
table = damask.Table(blownUp,table.shapes,table.comments) table = damask.Table(blownUp,table.shapes,table.comments)
coords = damask.grid_filters.cell_coord0(outSize,size,origin) coords = damask.grid_filters.cell_coord0(outSize,size,origin)
table.set(options.pos,coords.reshape(-1,3)) table.set(options.pos,coords.reshape(-1,3,order='F'))
table.set('elem',np.arange(1,outSize.prod()+1)) table.set('elem',np.arange(1,outSize.prod()+1))
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name)) outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))

View File

@ -1,43 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Append data of ASCIItable(s) row-wise.
""", version = scriptID)
parser.add_option('-a', '--add','--table',
dest = 'table',
action = 'extend', metavar = '<string LIST>',
help = 'tables to add')
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.table is None:
parser.error('no table specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for growTable in options.table:
table2 = damask.Table.from_ASCII(growTable)
table.append(table2)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -2,6 +2,7 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np import numpy as np
@ -41,73 +42,20 @@ parser.set_defaults(label = [],
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if len(options.label) == 0:
parser.error('no labels specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name)
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.head_read() randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
rng = np.random.default_rng(randomSeed)
# ------------------------------------------ process labels --------------------------------------- for label in options.label:
data = table.get(label)
uniques,inverse = np.unique(data,return_inverse=True,axis=0) if options.unique else (data,np.arange(len(data)))
rng.shuffle(uniques)
table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
errors = [] table.to_ASCII(sys.stdout if name is None else name)
remarks = []
columns = []
dims = []
indices = table.label_index (options.label)
dimensions = table.label_dimension(options.label)
for i,index in enumerate(indices):
if index == -1: remarks.append('label "{}" not present...'.format(options.label[i]))
else:
columns.append(index)
dims.append(dimensions[i])
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
np.random.seed(randomSeed)
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),
'random seed {}'.format(randomSeed),
])
table.head_write()
# ------------------------------------------ process data ------------------------------------------
table.data_readArray() # read all data at once
for col,dim in zip(columns,dims):
if options.unique:
s = set(map(tuple,table.data[:,col:col+dim])) # generate set of (unique) values
uniques = np.array(map(np.array,s)) # translate set to np.array
shuffler = dict(zip(s,np.random.permutation(len(s)))) # random permutation
table.data[:,col:col+dim] = uniques[np.array(map(lambda x: shuffler[tuple(x)],
table.data[:,col:col+dim]))] # fill table with mapped uniques
else:
np.random.shuffle(table.data[:,col:col+dim]) # independently shuffle every row
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -1,50 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Rename scalar, vectorial, and/or tensorial data header labels.
""", version = scriptID)
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar='<string LIST>',
help = 'column(s) to rename')
parser.add_option('-s','--substitute',
dest = 'substitute',
action = 'extend', metavar='<string LIST>',
help = 'new column label(s)')
parser.set_defaults(label = [],
substitute = [],
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if len(options.label) != len(options.substitute):
parser.error('number of column labels and substitutes do not match.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label,substitute in zip(options.label,options.substitute):
table.rename(label,substitute,scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,49 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Uniformly scale column values by given factor.
""", version = scriptID)
parser.add_option('-l','--label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help ='column(s) to scale')
parser.add_option('-f','--factor',
dest = 'factor',
action = 'extend', metavar='<float LIST>',
help = 'factor(s) per column')
parser.set_defaults(label = [],
factor = [])
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if len(options.labels) != len(options.factor):
parser.error('number of column labels and factors do not match.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label,factor in zip(options.labels,options.factor):
table.set(label,table.get(label)*float(factor),scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,49 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Uniformly shift column values by given offset.
""", version = scriptID)
parser.add_option('-l','--label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help ='column(s) to shift')
parser.add_option('-o','--offset',
dest = 'offset',
action = 'extend', metavar='<float LIST>',
help = 'offset(s) per column')
parser.set_defaults(label = [],
offset = [])
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if len(options.labels) != len(options.offset):
parser.error('number of column labels and offsets do not match.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label,offset in zip(options.labels,options.offset):
table.set(label,table.get(label)+float(offset),scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,50 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Sort rows by given (or all) column label(s).
Examples:
With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fastest varying index: --label x,y,z.
""", version = scriptID)
parser.add_option('-l','--label',
dest = 'labels',
action = 'extend', metavar = '<string LIST>',
help = 'list of column labels (a,b,c,...)')
parser.add_option('-r','--reverse',
dest = 'reverse',
action = 'store_true',
help = 'sort in reverse')
parser.set_defaults(reverse = False,
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.labels is None:
parser.error('no labels specified.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.sort_by(options.labels,not options.reverse)
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -1,42 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
Smooth microstructure by selecting most frequent index within given stencil at each location.
""", version=scriptID)
parser.add_option('-s','--stencil',
dest = 'stencil',
type = 'int', metavar = 'int',
help = 'size of smoothing stencil [%default]')
parser.set_defaults(stencil = 3)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom.clean(options.stencil))
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
geom.to_file(sys.stdout if name is None else name,pack=False)

View File

@ -1,66 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
Generate homogeneous geometry.
""", version = scriptID)
parser.add_option('-g','--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help = 'a,b,c grid of hexahedral box %default')
parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help = 'x,y,z of geometry size')
parser.add_option('-o','--origin',
dest = 'origin',
type = 'float', nargs = 3, metavar = ' '.join(['float']*3),
help = 'x,y,z of geometry origin %default')
parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index [%default]')
parser.add_option('-f','--fill',
dest = 'fill',
type = 'float', metavar = 'int',
help = 'microstructure index [%default]')
parser.set_defaults(grid = (16,16,16),
origin = (0.,0.,0.),
homogenization = 1,
fill = 1,
)
(options, filename) = parser.parse_args()
name = None if filename == [] else filename[0]
damask.util.report(scriptName,name)
dtype = float if np.isnan(options.fill) or int(options.fill) != options.fill else int
geom = damask.Geom(microstructure=np.full(options.grid,options.fill,dtype=dtype),
size=options.size,
origin=options.origin,
homogenization=options.homogenization,
comments=scriptID + ' ' + ' '.join(sys.argv[1:]))
damask.util.croak(geom)
geom.to_file(sys.stdout if name is None else name,pack=False)

View File

@ -24,22 +24,22 @@ def findClosestSeed(seeds, weights, point):
def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2): def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2):
if periodic: if periodic:
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3) weights_p = np.tile(weights.squeeze(),27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.]))) seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.]))) seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]]))) seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3,order='F') coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3)
else: else:
weights_p = weights.flatten() weights_p = weights.squeeze()
seeds_p = seeds seeds_p = seeds
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F') coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
if cpus > 1: if cpus > 1:
pool = multiprocessing.Pool(processes = cpus) pool = multiprocessing.Pool(processes = cpus)
result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords]) result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords])
pool.close() pool.close()
pool.join() pool.join()
closest_seed = np.array(result.get()) closest_seed = np.array(result.get()).reshape(-1,3)
else: else:
closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords]) closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords])
@ -52,7 +52,7 @@ def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), peri
def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True): def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True):
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F') coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds) KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
devNull,closest_seed = KDTree.query(coords) devNull,closest_seed = KDTree.query(coords)

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
Mirror along given directions.
""", version=scriptID)
validDirections = ['x','y','z']
parser.add_option('-d','--direction',
dest = 'directions',
action = 'extend', metavar = '<string LIST>',
help = "directions in which to mirror {{{}}}".format(','.join(validDirections)))
parser.add_option( '--reflect',
dest = 'reflect',
action = 'store_true',
help = 'reflect (include) outermost layers')
parser.set_defaults(reflect = False)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom.mirror(options.directions,options.reflect))
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
geom.to_file(sys.stdout if name is None else name,pack=False)

View File

@ -1,37 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
Pack ranges to "a to b" and/or multiples to "n of x".
""", version = scriptID)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom)
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
geom.to_file(sys.stdout if name is None else name,pack=True)

View File

@ -1,60 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [geomfile(s)]', description = """
Scales independently in x, y, and z direction in terms of grid and/or size.
Either absolute values or relative factors (like "0.25x") can be used.
""", version = scriptID)
parser.add_option('-g', '--grid',
dest = 'grid',
type = 'string', nargs = 3, metavar = 'string string string',
help = 'a,b,c grid of hexahedral box')
parser.add_option('-s', '--size',
dest = 'size',
type = 'string', nargs = 3, metavar = 'string string string',
help = 'x,y,z size of hexahedral box')
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid = geom.get_grid()
size = geom.get_size()
new_grid = grid if options.grid is None else \
np.array([int(o*float(n.lower().replace('x',''))) if n.lower().endswith('x') \
else int(n) for o,n in zip(grid,options.grid)],dtype=int)
new_size = size if options.size is None else \
np.array([o*float(n.lower().replace('x','')) if n.lower().endswith('x') \
else float(n) for o,n in zip(size,options.size)],dtype=float)
geom.scale(new_grid)
damask.util.croak(geom.update(microstructure = None,size = new_size))
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
geom.to_file(sys.stdout if name is None else name,pack=False)

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
Translate geom description into ASCIItable containing position and microstructure.
""", version = scriptID)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom)
coord0 = damask.grid_filters.cell_coord0(geom.grid,geom.size,geom.origin).reshape(-1,3)
comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
'grid\ta {}\tb {}\tc {}'.format(*geom.grid),
'size\tx {}\ty {}\tz {}'.format(*geom.size),
'origin\tx {}\ty {}\tz {}'.format(*geom.origin),
'homogenization\t{}'.format(geom.homogenization)]
table = damask.Table(coord0,{'pos':(3,)},comments)
table.add('microstructure',geom.microstructure.reshape((-1,1),order='F'))
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.txt')

View File

@ -1,37 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
from io import StringIO
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
# MAIN
#--------------------------------------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog [geomfile(s)]', description = """
Unpack ranges "a to b" and/or "n of x" multiples (exclusively in one line).
""", version = scriptID)
(options, filenames) = parser.parse_args()
if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
damask.util.croak(geom)
geom.add_comments(scriptID + ' ' + ' '.join(sys.argv[1:]))
geom.to_file(sys.stdout if name is None else name,pack=False)

View File

@ -45,7 +45,7 @@ options.blacklist = [int(i) for i in options.blacklist]
for name in filenames: for name in filenames:
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name) geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
microstructure = geom.get_microstructure().reshape((-1,1),order='F') microstructure = geom.get_microstructure().reshape((-1,1),order='F')
@ -53,9 +53,9 @@ for name in filenames:
np.full(geom.grid.prod(),True,dtype=bool), np.full(geom.grid.prod(),True,dtype=bool),
np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \ np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \
np.full(geom.grid.prod(),True,dtype=bool)) np.full(geom.grid.prod(),True,dtype=bool))
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3) seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3,order='F')
comments = geom.comments \ comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]), + [scriptID + ' ' + ' '.join(sys.argv[1:]),
'grid\ta {}\tb {}\tc {}'.format(*geom.grid), 'grid\ta {}\tb {}\tc {}'.format(*geom.grid),

View File

@ -128,7 +128,7 @@ for name in filenames:
if not options.selective: if not options.selective:
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3) coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \ seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \
+ np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid + np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid
else: else:

View File

@ -1,2 +1,5 @@
[run] [run]
omit = tests/* omit = tests/*
damask/_asciitable.py
damask/_test.py
damask/config/*

View File

@ -322,11 +322,10 @@ class Geom:
if i != grid.prod(): if i != grid.prod():
raise TypeError('Invalid file: expected {} entries, found {}'.format(grid.prod(),i)) raise TypeError('Invalid file: expected {} entries, found {}'.format(grid.prod(),i))
microstructure = microstructure.reshape(grid,order='F') if not np.any(np.mod(microstructure,1) != 0.0): # no float present
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
microstructure = microstructure.astype('int') microstructure = microstructure.astype('int')
return Geom(microstructure.reshape(grid),size,origin,homogenization,comments) return Geom(microstructure.reshape(grid,order='F'),size,origin,homogenization,comments)
@staticmethod @staticmethod
@ -352,16 +351,15 @@ class Geom:
""" """
if periodic: if periodic:
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3) weights_p = np.tile(weights,27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.]))) seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.]))) seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]]))) seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3,order='F') coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3)
else: else:
weights_p = weights.flatten() weights_p = weights
seeds_p = seeds seeds_p = seeds
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F') coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
pool = multiprocessing.Pool(processes = int(Environment().options['DAMASK_NUM_THREADS'])) pool = multiprocessing.Pool(processes = int(Environment().options['DAMASK_NUM_THREADS']))
result = pool.map_async(partial(Geom._find_closest_seed,seeds_p,weights_p), [coord for coord in coords]) result = pool.map_async(partial(Geom._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
@ -396,7 +394,7 @@ class Geom:
perform a periodic tessellation. Defaults to True. perform a periodic tessellation. Defaults to True.
""" """
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F') coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds) KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
devNull,microstructure = KDTree.query(coords) devNull,microstructure = KDTree.query(coords)

View File

@ -38,6 +38,9 @@ class Orientation:
else: else:
self.rotation = Rotation.fromQuaternion(rotation) # assume quaternion self.rotation = Rotation.fromQuaternion(rotation) # assume quaternion
if self.rotation.quaternion.shape != (4,):
raise NotImplementedError('Support for multiple rotations missing')
def disorientation(self, def disorientation(self,
other, other,
SST = True, SST = True,

View File

@ -111,7 +111,7 @@ class Result:
select from 'set', 'add', and 'del' select from 'set', 'add', and 'del'
what : str what : str
attribute to change (must be from self.selection) attribute to change (must be from self.selection)
datasets : list of str or Boolean datasets : list of str or bool
name of datasets as list, supports ? and * wildcards. name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to [] True is equivalent to [*], False is equivalent to []
@ -203,7 +203,7 @@ class Result:
---------- ----------
what : str what : str
attribute to change (must be from self.selection) attribute to change (must be from self.selection)
datasets : list of str or Boolean datasets : list of str or bool
name of datasets as list, supports ? and * wildcards. name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to [] True is equivalent to [*], False is equivalent to []
@ -219,7 +219,7 @@ class Result:
---------- ----------
what : str what : str
attribute to change (must be from self.selection) attribute to change (must be from self.selection)
datasets : list of str or Boolean datasets : list of str or bool
name of datasets as list, supports ? and * wildcards. name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to [] True is equivalent to [*], False is equivalent to []
@ -235,7 +235,7 @@ class Result:
---------- ----------
what : str what : str
attribute to change (must be from self.selection) attribute to change (must be from self.selection)
datasets : list of str or Boolean datasets : list of str or bool
name of datasets as list, supports ? and * wildcards. name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to [] True is equivalent to [*], False is equivalent to []
@ -262,10 +262,10 @@ class Result:
datasets : iterable or str datasets : iterable or str
component : int component : int
homogenization component to consider for constituent data homogenization component to consider for constituent data
tagged : Boolean tagged : bool
tag Table.column name with '#component' tag Table.column name with '#component'
defaults to False defaults to False
split : Boolean split : bool
split Table by increment and return dictionary of Tables split Table by increment and return dictionary of Tables
defaults to True defaults to True
@ -326,7 +326,7 @@ class Result:
Parameters Parameters
---------- ----------
datasets : iterable or str or Boolean datasets : iterable or str or bool
Examples Examples
-------- --------
@ -460,11 +460,19 @@ class Result:
def cell_coordinates(self): def cell_coordinates(self):
"""Return initial coordinates of the cell centers.""" """Return initial coordinates of the cell centers."""
if self.structured: if self.structured:
return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3) return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
else: else:
with h5py.File(self.fname,'r') as f: with h5py.File(self.fname,'r') as f:
return f['geometry/x_c'][()] return f['geometry/x_c'][()]
def node_coordinates(self):
"""Return initial coordinates of the cell centers."""
if self.structured:
return grid_filters.node_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
else:
with h5py.File(self.fname,'r') as f:
return f['geometry/x_n'][()]
@staticmethod @staticmethod
def _add_absolute(x): def _add_absolute(x):

View File

@ -1,6 +1,7 @@
import numpy as np import numpy as np
from ._Lambert import ball_to_cube, cube_to_ball from ._Lambert import ball_to_cube, cube_to_ball
from . import mechanics
_P = -1 _P = -1
@ -61,6 +62,8 @@ class Rotation:
def __repr__(self): def __repr__(self):
"""Orientation displayed as unit quaternion, rotation matrix, and Bunge-Euler angles.""" """Orientation displayed as unit quaternion, rotation matrix, and Bunge-Euler angles."""
if self.quaternion.shape != (4,):
raise NotImplementedError('Support for multiple rotations missing')
return '\n'.join([ return '\n'.join([
'Quaternion: (real={:.3f}, imag=<{:+.3f}, {:+.3f}, {:+.3f}>)'.format(*(self.quaternion)), 'Quaternion: (real={:.3f}, imag=<{:+.3f}, {:+.3f}, {:+.3f}>)'.format(*(self.quaternion)),
'Matrix:\n{}'.format(self.asMatrix()), 'Matrix:\n{}'.format(self.asMatrix()),
@ -83,6 +86,8 @@ class Rotation:
considere rotation of (3,3,3,3)-matrix considere rotation of (3,3,3,3)-matrix
""" """
if self.quaternion.shape != (4,):
raise NotImplementedError('Support for multiple rotations missing')
if isinstance(other, Rotation): # rotate a rotation if isinstance(other, Rotation): # rotate a rotation
self_q = self.quaternion[0] self_q = self.quaternion[0]
self_p = self.quaternion[1:] self_p = self.quaternion[1:]
@ -107,7 +112,7 @@ class Rotation:
elif other.shape == (3,3,): # rotate a single (3x3)-matrix elif other.shape == (3,3,): # rotate a single (3x3)-matrix
return np.dot(self.asMatrix(),np.dot(other,self.asMatrix().T)) return np.dot(self.asMatrix(),np.dot(other,self.asMatrix().T))
elif other.shape == (3,3,3,3,): elif other.shape == (3,3,3,3,):
raise NotImplementedError raise NotImplementedError('Support for rotation of 4th order tensors missing')
else: else:
return NotImplemented return NotImplemented
else: else:
@ -116,7 +121,7 @@ class Rotation:
def inverse(self): def inverse(self):
"""In-place inverse rotation/backward rotation.""" """In-place inverse rotation/backward rotation."""
self.quaternion[1:] *= -1 self.quaternion[...,1:] *= -1
return self return self
def inversed(self): def inversed(self):
@ -125,12 +130,12 @@ class Rotation:
def standardize(self): def standardize(self):
"""In-place quaternion representation with positive q.""" """In-place quaternion representation with positive real part."""
if self.quaternion[0] < 0.0: self.quaternion*=-1 self.quaternion[self.quaternion[...,0] < 0.0] *= -1
return self return self
def standardized(self): def standardized(self):
"""Quaternion representation with positive q.""" """Quaternion representation with positive real part."""
return self.copy().standardize() return self.copy().standardize()
@ -157,15 +162,17 @@ class Rotation:
Rotation from which the average is rotated. Rotation from which the average is rotated.
""" """
if self.quaternion.shape != (4,) or other.quaternion.shape != (4,):
raise NotImplementedError('Support for multiple rotations missing')
return Rotation.fromAverage([self,other]) return Rotation.fromAverage([self,other])
################################################################################################ ################################################################################################
# convert to different orientation representations (numpy arrays) # convert to different orientation representations (numpy arrays)
def asQuaternion(self): def as_quaternion(self):
""" """
Unit quaternion [q, p_1, p_2, p_3] unless quaternion == True: damask.quaternion object. Unit quaternion [q, p_1, p_2, p_3].
Parameters Parameters
---------- ----------
@ -175,8 +182,8 @@ class Rotation:
""" """
return self.quaternion return self.quaternion
def asEulers(self, def as_Eulers(self,
degrees = False): degrees = False):
""" """
Bunge-Euler angles: (φ_1, ϕ, φ_2). Bunge-Euler angles: (φ_1, ϕ, φ_2).
@ -190,9 +197,9 @@ class Rotation:
if degrees: eu = np.degrees(eu) if degrees: eu = np.degrees(eu)
return eu return eu
def asAxisAngle(self, def as_axis_angle(self,
degrees = False, degrees = False,
pair = False): pair = False):
""" """
Axis angle representation [n_1, n_2, n_3, ω] unless pair == True: ([n_1, n_2, n_3], ω). Axis angle representation [n_1, n_2, n_3, ω] unless pair == True: ([n_1, n_2, n_3], ω).
@ -205,15 +212,15 @@ class Rotation:
""" """
ax = Rotation.qu2ax(self.quaternion) ax = Rotation.qu2ax(self.quaternion)
if degrees: ax[3] = np.degrees(ax[3]) if degrees: ax[...,3] = np.degrees(ax[...,3])
return (ax[:3],ax[3]) if pair else ax return (ax[...,:3],ax[...,3]) if pair else ax
def asMatrix(self): def as_matrix(self):
"""Rotation matrix.""" """Rotation matrix."""
return Rotation.qu2om(self.quaternion) return Rotation.qu2om(self.quaternion)
def asRodrigues(self, def as_Rodrigues(self,
vector = False): vector = False):
""" """
Rodrigues-Frank vector representation [n_1, n_2, n_3, tan(ω/2)] unless vector == True: [n_1, n_2, n_3] * tan(ω/2). Rodrigues-Frank vector representation [n_1, n_2, n_3, tan(ω/2)] unless vector == True: [n_1, n_2, n_3] * tan(ω/2).
@ -224,9 +231,9 @@ class Rotation:
""" """
ro = Rotation.qu2ro(self.quaternion) ro = Rotation.qu2ro(self.quaternion)
return ro[:3]*ro[3] if vector else ro return ro[...,:3]*ro[...,3] if vector else ro
def asHomochoric(self): def as_homochoric(self):
"""Homochoric vector: (h_1, h_2, h_3).""" """Homochoric vector: (h_1, h_2, h_3)."""
return Rotation.qu2ho(self.quaternion) return Rotation.qu2ho(self.quaternion)
@ -234,7 +241,7 @@ class Rotation:
"""Cubochoric vector: (c_1, c_2, c_3).""" """Cubochoric vector: (c_1, c_2, c_3)."""
return Rotation.qu2cu(self.quaternion) return Rotation.qu2cu(self.quaternion)
def asM(self): def M(self): # ToDo not sure about the name: as_M or M? we do not have a from_M
""" """
Intermediate representation supporting quaternion averaging. Intermediate representation supporting quaternion averaging.
@ -244,114 +251,133 @@ class Rotation:
https://doi.org/10.2514/1.28949 https://doi.org/10.2514/1.28949
""" """
return np.outer(self.quaternion,self.quaternion) return np.einsum('...i,...j',self.quaternion,self.quaternion)
# for compatibility (old names do not follow convention)
asM = M
asQuaternion = as_quaternion
asEulers = as_Eulers
asAxisAngle = as_axis_angle
asMatrix = as_matrix
asRodrigues = as_Rodrigues
asHomochoric = as_homochoric
################################################################################################ ################################################################################################
# static constructors. The input data needs to follow the convention, options allow to # Static constructors. The input data needs to follow the conventions, options allow to
# relax these convections # relax the conventions.
@staticmethod @staticmethod
def fromQuaternion(quaternion, def from_quaternion(quaternion,
acceptHomomorph = False, acceptHomomorph = False,
P = -1): P = -1):
qu = quaternion if isinstance(quaternion,np.ndarray) and quaternion.dtype == np.dtype(float) \ qu = np.array(quaternion,dtype=float)
else np.array(quaternion,dtype=float) if qu.shape[:-2:-1] != (4,):
if P > 0: qu[1:4] *= -1 # convert from P=1 to P=-1 raise ValueError('Invalid shape.')
if qu[0] < 0.0:
if acceptHomomorph: if P > 0: qu[...,1:4] *= -1 # convert from P=1 to P=-1
qu *= -1. if acceptHomomorph:
else: qu[qu[...,0] < 0.0] *= -1
raise ValueError('Quaternion has negative first component: {}.'.format(qu[0])) else:
if not np.isclose(np.linalg.norm(qu), 1.0): if np.any(qu[...,0] < 0.0):
raise ValueError('Quaternion is not of unit length: {} {} {} {}.'.format(*qu)) raise ValueError('Quaternion with negative first (real) component.')
if not np.all(np.isclose(np.linalg.norm(qu,axis=-1), 1.0)):
raise ValueError('Quaternion is not of unit length.')
return Rotation(qu) return Rotation(qu)
@staticmethod @staticmethod
def fromEulers(eulers, def from_Eulers(eulers,
degrees = False): degrees = False):
eu = np.array(eulers,dtype=float)
if eu.shape[:-2:-1] != (3,):
raise ValueError('Invalid shape.')
eu = eulers if isinstance(eulers, np.ndarray) and eulers.dtype == np.dtype(float) \
else np.array(eulers,dtype=float)
eu = np.radians(eu) if degrees else eu eu = np.radians(eu) if degrees else eu
if np.any(eu < 0.0) or np.any(eu > 2.0*np.pi) or eu[1] > np.pi: if np.any(eu < 0.0) or np.any(eu > 2.0*np.pi) or np.any(eu[...,1] > np.pi): # ToDo: No separate check for PHI
raise ValueError('Euler angles outside of [0..2π],[0..π],[0..2π]: {} {} {}.'.format(*eu)) raise ValueError('Euler angles outside of [0..2π],[0..π],[0..2π].')
return Rotation(Rotation.eu2qu(eu)) return Rotation(Rotation.eu2qu(eu))
@staticmethod @staticmethod
def fromAxisAngle(angleAxis, def from_axis_angle(axis_angle,
degrees = False, degrees = False,
normalise = False, normalise = False,
P = -1): P = -1):
ax = angleAxis if isinstance(angleAxis, np.ndarray) and angleAxis.dtype == np.dtype(float) \ ax = np.array(axis_angle,dtype=float)
else np.array(angleAxis,dtype=float) if ax.shape[:-2:-1] != (4,):
if P > 0: ax[0:3] *= -1 # convert from P=1 to P=-1 raise ValueError('Invalid shape.')
if degrees: ax[ 3] = np.radians(ax[3])
if normalise: ax[0:3] /= np.linalg.norm(ax[0:3]) if P > 0: ax[...,0:3] *= -1 # convert from P=1 to P=-1
if ax[3] < 0.0 or ax[3] > np.pi: if degrees: ax[..., 3] = np.radians(ax[...,3])
raise ValueError('Axis angle rotation angle outside of [0..π]: {}.'.format(ax[3])) if normalise: ax[...,0:3] /= np.linalg.norm(ax[...,0:3],axis=-1)
if not np.isclose(np.linalg.norm(ax[0:3]), 1.0): if np.any(ax[...,3] < 0.0) or np.any(ax[...,3] > np.pi):
raise ValueError('Axis angle rotation axis is not of unit length: {} {} {}.'.format(*ax[0:3])) raise ValueError('Axis angle rotation angle outside of [0..π].')
if not np.all(np.isclose(np.linalg.norm(ax[...,0:3],axis=-1), 1.0)):
raise ValueError('Axis angle rotation axis is not of unit length.')
return Rotation(Rotation.ax2qu(ax)) return Rotation(Rotation.ax2qu(ax))
@staticmethod @staticmethod
def fromBasis(basis, def from_basis(basis,
orthonormal = True, orthonormal = True,
reciprocal = False, reciprocal = False):
):
om = np.array(basis,dtype=float)
if om.shape[:-3:-1] != (3,3):
raise ValueError('Invalid shape.')
om = basis if isinstance(basis, np.ndarray) else np.array(basis).reshape(3,3)
if reciprocal: if reciprocal:
om = np.linalg.inv(om.T/np.pi) # transform reciprocal basis set om = np.linalg.inv(mechanics.transpose(om)/np.pi) # transform reciprocal basis set
orthonormal = False # contains stretch orthonormal = False # contains stretch
if not orthonormal: if not orthonormal:
(U,S,Vh) = np.linalg.svd(om) # singular value decomposition (U,S,Vh) = np.linalg.svd(om) # singular value decomposition
om = np.dot(U,Vh) om = np.einsum('...ij,...jl->...il',U,Vh)
if not np.isclose(np.linalg.det(om),1.0): if not np.all(np.isclose(np.linalg.det(om),1.0)):
raise ValueError('matrix is not a proper rotation: {}.'.format(om)) raise ValueError('Orientation matrix has determinant ≠ 1.')
if not np.isclose(np.dot(om[0],om[1]), 0.0) \ if not np.all(np.isclose(np.einsum('...i,...i',om[...,0],om[...,1]), 0.0)) \
or not np.isclose(np.dot(om[1],om[2]), 0.0) \ or not np.all(np.isclose(np.einsum('...i,...i',om[...,1],om[...,2]), 0.0)) \
or not np.isclose(np.dot(om[2],om[0]), 0.0): or not np.all(np.isclose(np.einsum('...i,...i',om[...,2],om[...,0]), 0.0)):
raise ValueError('matrix is not orthogonal: {}.'.format(om)) raise ValueError('Orientation matrix is not orthogonal.')
return Rotation(Rotation.om2qu(om)) return Rotation(Rotation.om2qu(om))
@staticmethod @staticmethod
def fromMatrix(om, def from_matrix(om):
):
return Rotation.fromBasis(om) return Rotation.from_basis(om)
@staticmethod @staticmethod
def fromRodrigues(rodrigues, def from_Rodrigues(rodrigues,
normalise = False, normalise = False,
P = -1): P = -1):
ro = rodrigues if isinstance(rodrigues, np.ndarray) and rodrigues.dtype == np.dtype(float) \ ro = np.array(rodrigues,dtype=float)
else np.array(rodrigues,dtype=float) if ro.shape[:-2:-1] != (4,):
if P > 0: ro[0:3] *= -1 # convert from P=1 to P=-1 raise ValueError('Invalid shape.')
if normalise: ro[0:3] /= np.linalg.norm(ro[0:3])
if not np.isclose(np.linalg.norm(ro[0:3]), 1.0): if P > 0: ro[...,0:3] *= -1 # convert from P=1 to P=-1
raise ValueError('Rodrigues rotation axis is not of unit length: {} {} {}.'.format(*ro[0:3])) if normalise: ro[...,0:3] /= np.linalg.norm(ro[...,0:3],axis=-1)
if ro[3] < 0.0: if np.any(ro[...,3] < 0.0):
raise ValueError('Rodrigues rotation angle not positive: {}.'.format(ro[3])) raise ValueError('Rodrigues vector rotation angle not positive.')
if not np.all(np.isclose(np.linalg.norm(ro[...,0:3],axis=-1), 1.0)):
raise ValueError('Rodrigues vector rotation axis is not of unit length.')
return Rotation(Rotation.ro2qu(ro)) return Rotation(Rotation.ro2qu(ro))
@staticmethod @staticmethod
def fromHomochoric(homochoric, def from_homochoric(homochoric,
P = -1): P = -1):
ho = np.array(homochoric,dtype=float)
if ho.shape[:-2:-1] != (3,):
raise ValueError('Invalid shape.')
ho = homochoric if isinstance(homochoric, np.ndarray) and homochoric.dtype == np.dtype(float) \
else np.array(homochoric,dtype=float)
if P > 0: ho *= -1 # convert from P=1 to P=-1 if P > 0: ho *= -1 # convert from P=1 to P=-1
if np.linalg.norm(ho) > (3.*np.pi/4.)**(1./3.)+1e-9: if np.any(np.linalg.norm(ho,axis=-1) > (3.*np.pi/4.)**(1./3.)+1e-9):
raise ValueError('Coordinate outside of the sphere: {} {} {}.'.format(ho)) raise ValueError('Homochoric coordinate outside of the sphere.')
return Rotation(Rotation.ho2qu(ho)) return Rotation(Rotation.ho2qu(ho))
@ -359,11 +385,12 @@ class Rotation:
def fromCubochoric(cubochoric, def fromCubochoric(cubochoric,
P = -1): P = -1):
cu = cubochoric if isinstance(cubochoric, np.ndarray) and cubochoric.dtype == np.dtype(float) \ cu = np.array(cubochoric,dtype=float)
else np.array(cubochoric,dtype=float) if cu.shape[:-2:-1] != (3,):
raise ValueError('Invalid shape.')
if np.abs(np.max(cu))>np.pi**(2./3.) * 0.5+1e-9: if np.abs(np.max(cu))>np.pi**(2./3.) * 0.5+1e-9:
raise ValueError('Coordinate outside of the cube: {} {} {}.'.format(*cu)) raise ValueError('Cubochoric coordinate outside of the cube: {} {} {}.'.format(*cu))
ho = Rotation.cu2ho(cu) ho = Rotation.cu2ho(cu)
if P > 0: ho *= -1 # convert from P=1 to P=-1 if P > 0: ho *= -1 # convert from P=1 to P=-1
@ -403,17 +430,34 @@ class Rotation:
return Rotation.fromQuaternion(np.real(vec.T[eig.argmax()]),acceptHomomorph = True) return Rotation.fromQuaternion(np.real(vec.T[eig.argmax()]),acceptHomomorph = True)
@staticmethod @staticmethod
def fromRandom(): def from_random(shape=None):
r = np.random.random(3) if shape is None:
A = np.sqrt(r[2]) r = np.random.random(3)
B = np.sqrt(1.0-r[2]) elif hasattr(shape, '__iter__'):
return Rotation(np.array([np.cos(2.0*np.pi*r[0])*A, r = np.random.random(tuple(shape)+(3,))
np.sin(2.0*np.pi*r[1])*B, else:
np.cos(2.0*np.pi*r[1])*B, r = np.random.random((shape,3))
np.sin(2.0*np.pi*r[0])*A])).standardize()
A = np.sqrt(r[...,2])
B = np.sqrt(1.0-r[...,2])
q = np.stack([np.cos(2.0*np.pi*r[...,0])*A,
np.sin(2.0*np.pi*r[...,1])*B,
np.cos(2.0*np.pi*r[...,1])*B,
np.sin(2.0*np.pi*r[...,0])*A],axis=-1)
return Rotation(q.reshape(r.shape[:-1]+(4,)) if shape is not None else q).standardize()
# for compatibility (old names do not follow convention)
fromQuaternion = from_quaternion
fromEulers = from_Eulers
fromAxisAngle = from_axis_angle
fromBasis = from_basis
fromMatrix = from_matrix
fromRodrigues = from_Rodrigues
fromHomochoric = from_homochoric
fromRandom = from_random
#################################################################################################### ####################################################################################################
# Code below available according to the following conditions on https://github.com/MarDiehl/3Drotations # Code below available according to the following conditions on https://github.com/MarDiehl/3Drotations
@ -808,12 +852,11 @@ class Rotation:
c = np.cos(ax[3]*0.5) c = np.cos(ax[3]*0.5)
s = np.sin(ax[3]*0.5) s = np.sin(ax[3]*0.5)
qu = np.array([ c, ax[0]*s, ax[1]*s, ax[2]*s ]) qu = np.array([ c, ax[0]*s, ax[1]*s, ax[2]*s ])
return qu
else: else:
c = np.cos(ax[...,3:4]*.5) c = np.cos(ax[...,3:4]*.5)
s = np.sin(ax[...,3:4]*.5) s = np.sin(ax[...,3:4]*.5)
qu = np.where(np.abs(ax[...,3:4])<1.e-6,[1.0, 0.0, 0.0, 0.0],np.block([c, ax[...,:3]*s])) qu = np.where(np.abs(ax[...,3:4])<1.e-6,[1.0, 0.0, 0.0, 0.0],np.block([c, ax[...,:3]*s]))
return qu return qu
@staticmethod @staticmethod
def ax2om(ax): def ax2om(ax):
@ -859,7 +902,7 @@ class Rotation:
# 180 degree case # 180 degree case
ro += [np.inf] if np.isclose(ax[3],np.pi,atol=1.0e-15,rtol=0.0) else \ ro += [np.inf] if np.isclose(ax[3],np.pi,atol=1.0e-15,rtol=0.0) else \
[np.tan(ax[3]*0.5)] [np.tan(ax[3]*0.5)]
return np.array(ro) ro = np.array(ro)
else: else:
ro = np.block([ax[...,:3], ro = np.block([ax[...,:3],
np.where(np.isclose(ax[...,3:4],np.pi,atol=1.e-15,rtol=.0), np.where(np.isclose(ax[...,3:4],np.pi,atol=1.e-15,rtol=.0),
@ -867,7 +910,7 @@ class Rotation:
np.tan(ax[...,3:4]*0.5)) np.tan(ax[...,3:4]*0.5))
]) ])
ro[np.abs(ax[...,3])<1.e-6] = [.0,.0,_P,.0] ro[np.abs(ax[...,3])<1.e-6] = [.0,.0,_P,.0]
return ro return ro
@staticmethod @staticmethod
def ax2ho(ax): def ax2ho(ax):
@ -875,11 +918,10 @@ class Rotation:
if len(ax.shape) == 1: if len(ax.shape) == 1:
f = (0.75 * ( ax[3] - np.sin(ax[3]) ))**(1.0/3.0) f = (0.75 * ( ax[3] - np.sin(ax[3]) ))**(1.0/3.0)
ho = ax[0:3] * f ho = ax[0:3] * f
return ho
else: else:
f = (0.75 * ( ax[...,3:4] - np.sin(ax[...,3:4]) ))**(1.0/3.0) f = (0.75 * ( ax[...,3:4] - np.sin(ax[...,3:4]) ))**(1.0/3.0)
ho = ax[...,:3] * f ho = ax[...,:3] * f
return ho return ho
@staticmethod @staticmethod
def ax2cu(ax): def ax2cu(ax):
@ -936,7 +978,6 @@ class Rotation:
f = np.where(np.isfinite(ro[...,3:4]),2.0*np.arctan(ro[...,3:4]) -np.sin(2.0*np.arctan(ro[...,3:4])),np.pi) f = np.where(np.isfinite(ro[...,3:4]),2.0*np.arctan(ro[...,3:4]) -np.sin(2.0*np.arctan(ro[...,3:4])),np.pi)
ho = np.where(np.broadcast_to(np.sum(ro[...,0:3]**2.0,axis=-1,keepdims=True) < 1.e-6,ro[...,0:3].shape), ho = np.where(np.broadcast_to(np.sum(ro[...,0:3]**2.0,axis=-1,keepdims=True) < 1.e-6,ro[...,0:3].shape),
np.zeros(3), ro[...,0:3]* (0.75*f)**(1.0/3.0)) np.zeros(3), ro[...,0:3]* (0.75*f)**(1.0/3.0))
return ho return ho
@staticmethod @staticmethod
@ -1010,7 +1051,7 @@ class Rotation:
if len(ho.shape) == 1: if len(ho.shape) == 1:
return ball_to_cube(ho) return ball_to_cube(ho)
else: else:
raise NotImplementedError raise NotImplementedError('Support for multiple rotations missing')
#---------- Cubochoric ---------- #---------- Cubochoric ----------
@ -1045,4 +1086,4 @@ class Rotation:
if len(cu.shape) == 1: if len(cu.shape) == 1:
return cube_to_ball(cu) return cube_to_ball(cu)
else: else:
raise NotImplementedError raise NotImplementedError('Support for multiple rotations missing')

View File

@ -1,3 +1,17 @@
"""
Filters for operations on regular grids.
Notes
-----
The grids are defined as (x,y,z,...) where x is fastest and z is slowest.
This convention is consistent with the geom file format.
When converting to/from a plain list (e.g. storage in ASCII table),
the following operations are required for tensorial data:
D3 = D1.reshape(grid+(-1,),order='F').reshape(grid+(3,3))
D1 = D3.reshape(grid+(-1,)).reshape(-1,9,order='F')
"""
from scipy import spatial as _spatial from scipy import spatial as _spatial
import numpy as _np import numpy as _np
@ -7,8 +21,12 @@ def _ks(size,grid,first_order=False):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
grid : numpy.ndarray of shape (3)
number of grid points.
first_order : bool, optional
correction for first order derivatives, defaults to False.
""" """
k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0] k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0]
@ -19,8 +37,7 @@ def _ks(size,grid,first_order=False):
k_si = _np.arange(grid[2]//2+1)/size[2] k_si = _np.arange(grid[2]//2+1)/size[2]
kk, kj, ki = _np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij') return _np.stack(_np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij'), axis=-1)
return _np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
def curl(size,field): def curl(size,field):
@ -29,8 +46,10 @@ def curl(size,field):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
periodic field of which the curl is calculated.
""" """
n = _np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
@ -53,8 +72,10 @@ def divergence(size,field):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
periodic field of which the divergence is calculated.
""" """
n = _np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
@ -69,12 +90,14 @@ def divergence(size,field):
def gradient(size,field): def gradient(size,field):
""" """
Calculate gradient of a vector or scalar field in Fourier space. Calculate gradient of a scalar or vector field in Fourier space.
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,1) or (:,:,:,3)
periodic field of which the gradient is calculated.
""" """
n = _np.prod(field.shape[3:]) n = _np.prod(field.shape[3:])
@ -93,9 +116,9 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
Parameters Parameters
---------- ----------
grid : numpy.ndarray grid : numpy.ndarray of shape (3)
number of grid points. number of grid points.
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
origin : numpy.ndarray, optional origin : numpy.ndarray, optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
@ -103,7 +126,11 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
""" """
start = origin + size/grid*.5 start = origin + size/grid*.5
end = origin + size - size/grid*.5 end = origin + size - size/grid*.5
return _np.mgrid[start[0]:end[0]:grid[0]*1j,start[1]:end[1]:grid[1]*1j,start[2]:end[2]:grid[2]*1j].T
return _np.stack(_np.meshgrid(_np.linspace(start[0],end[0],grid[0]),
_np.linspace(start[1],end[1],grid[1]),
_np.linspace(start[2],end[2],grid[2]),indexing = 'ij'),
axis = -1)
def cell_displacement_fluct(size,F): def cell_displacement_fluct(size,F):
@ -112,7 +139,7 @@ def cell_displacement_fluct(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
@ -139,14 +166,14 @@ def cell_displacement_avg(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
""" """
F_avg = _np.average(F,axis=(0,1,2)) F_avg = _np.average(F,axis=(0,1,2))
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),cell_coord0(F.shape[:3][::-1],size)) return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),cell_coord0(F.shape[:3],size))
def cell_displacement(size,F): def cell_displacement(size,F):
@ -155,7 +182,7 @@ def cell_displacement(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
@ -170,25 +197,25 @@ def cell_coord(size,F,origin=_np.zeros(3)):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
origin : numpy.ndarray, optional origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
""" """
return cell_coord0(F.shape[:3][::-1],size,origin) + cell_displacement(size,F) return cell_coord0(F.shape[:3],size,origin) + cell_displacement(size,F)
def cell_coord0_gridSizeOrigin(coord0,ordered=True): def cell_coord0_gridSizeOrigin(coord0,ordered=True):
""" """
Return grid 'DNA', i.e. grid, size, and origin from array of cell positions. Return grid 'DNA', i.e. grid, size, and origin from 1D array of cell positions.
Parameters Parameters
---------- ----------
coord0 : numpy.ndarray coord0 : numpy.ndarray of shape (:,3)
array of undeformed cell coordinates. undeformed cell coordinates.
ordered : bool, optional ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow). expect coord0 data to be ordered (x fast, z slow).
@ -211,13 +238,13 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True):
start = origin + delta*.5 start = origin + delta*.5
end = origin - delta*.5 + size end = origin - delta*.5 + size
if not _np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \ if not (_np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
_np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \ _np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
_np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2])): _np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2]))):
raise ValueError('Regular grid spacing violated.') raise ValueError('Regular grid spacing violated.')
if ordered and not _np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)): if ordered and not _np.allclose(coord0.reshape(tuple(grid)+(3,),order='F'),cell_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.') raise ValueError('Input data is not ordered (x fast, z slow).')
return (grid,size,origin) return (grid,size,origin)
@ -241,17 +268,18 @@ def node_coord0(grid,size,origin=_np.zeros(3)):
Parameters Parameters
---------- ----------
grid : numpy.ndarray grid : numpy.ndarray of shape (3)
number of grid points. number of grid points.
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
origin : numpy.ndarray, optional origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
""" """
return _np.mgrid[origin[0]:size[0]+origin[0]:(grid[0]+1)*1j, return _np.stack(_np.meshgrid(_np.linspace(origin[0],size[0]+origin[0],grid[0]+1),
origin[1]:size[1]+origin[1]:(grid[1]+1)*1j, _np.linspace(origin[1],size[1]+origin[1],grid[1]+1),
origin[2]:size[2]+origin[2]:(grid[2]+1)*1j].T _np.linspace(origin[2],size[2]+origin[2],grid[2]+1),indexing = 'ij'),
axis = -1)
def node_displacement_fluct(size,F): def node_displacement_fluct(size,F):
@ -260,7 +288,7 @@ def node_displacement_fluct(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
@ -275,14 +303,14 @@ def node_displacement_avg(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
""" """
F_avg = _np.average(F,axis=(0,1,2)) F_avg = _np.average(F,axis=(0,1,2))
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),node_coord0(F.shape[:3][::-1],size)) return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),node_coord0(F.shape[:3],size))
def node_displacement(size,F): def node_displacement(size,F):
@ -291,7 +319,7 @@ def node_displacement(size,F):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
@ -306,15 +334,15 @@ def node_coord(size,F,origin=_np.zeros(3)):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size of the periodic field. physical size of the periodic field.
F : numpy.ndarray F : numpy.ndarray
deformation gradient field. deformation gradient field.
origin : numpy.ndarray, optional origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0]. physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
""" """
return node_coord0(F.shape[:3][::-1],size,origin) + node_displacement(size,F) return node_coord0(F.shape[:3],size,origin) + node_displacement(size,F)
def cell_2_node(cell_data): def cell_2_node(cell_data):
@ -335,14 +363,14 @@ def node_2_cell(node_data):
return c[:-1,:-1,:-1] return c[:-1,:-1,:-1]
def node_coord0_gridSizeOrigin(coord0,ordered=False): def node_coord0_gridSizeOrigin(coord0,ordered=True):
""" """
Return grid 'DNA', i.e. grid, size, and origin from array of nodal positions. Return grid 'DNA', i.e. grid, size, and origin from 1D array of nodal positions.
Parameters Parameters
---------- ----------
coord0 : numpy.ndarray coord0 : numpy.ndarray of shape (:,3)
array of undeformed nodal coordinates. undeformed nodal coordinates.
ordered : bool, optional ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow). expect coord0 data to be ordered (x fast, z slow).
@ -357,13 +385,13 @@ def node_coord0_gridSizeOrigin(coord0,ordered=False):
if (grid+1).prod() != len(coord0): if (grid+1).prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid)) raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
if not _np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \ if not (_np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
_np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \ _np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
_np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1)): _np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1))):
raise ValueError('Regular grid spacing violated.') raise ValueError('Regular grid spacing violated.')
if ordered and not _np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)): if ordered and not _np.allclose(coord0.reshape(tuple(grid+1)+(3,),order='F'),node_coord0(grid,size,origin)):
raise ValueError('Input data is not a regular grid.') raise ValueError('Input data is not ordered (x fast, z slow).')
return (grid,size,origin) return (grid,size,origin)
@ -374,15 +402,15 @@ def regrid(size,F,new_grid):
Parameters Parameters
---------- ----------
size : numpy.ndarray size : numpy.ndarray of shape (3)
physical size physical size
F : numpy.ndarray F : numpy.ndarray of shape (:,:,:,3,3)
deformation gradient field deformation gradient field
new_grid : numpy.ndarray new_grid : numpy.ndarray of shape (3)
new grid for undeformed coordinates new grid for undeformed coordinates
""" """
c = cell_coord0(F.shape[:3][::-1],size) \ c = cell_coord0(F.shape[:3],size) \
+ cell_displacement_avg(size,F) \ + cell_displacement_avg(size,F) \
+ cell_displacement_fluct(size,F) + cell_displacement_fluct(size,F)

View File

@ -135,16 +135,16 @@ def PK2(P,F):
Parameters Parameters
---------- ----------
P : numpy.ndarray of shape (:,3,3) or (3,3) P : numpy.ndarray of shape (...,3,3) or (3,3)
First Piola-Kirchhoff stress. First Piola-Kirchhoff stress.
F : numpy.ndarray of shape (:,3,3) or (3,3) F : numpy.ndarray of shape (...,3,3) or (3,3)
Deformation gradient. Deformation gradient.
""" """
if _np.shape(F) == _np.shape(P) == (3,3): if _np.shape(F) == _np.shape(P) == (3,3):
S = _np.dot(_np.linalg.inv(F),P) S = _np.dot(_np.linalg.inv(F),P)
else: else:
S = _np.einsum('ijk,ikl->ijl',_np.linalg.inv(F),P) S = _np.einsum('...jk,...kl->...jl',_np.linalg.inv(F),P)
return symmetric(S) return symmetric(S)
@ -241,7 +241,7 @@ def symmetric(T):
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (:,3,3) or (3,3) T : numpy.ndarray of shape (...,3,3) or (3,3)
Tensor of which the symmetrized values are computed. Tensor of which the symmetrized values are computed.
""" """
@ -254,12 +254,12 @@ def transpose(T):
Parameters Parameters
---------- ----------
T : numpy.ndarray of shape (:,3,3) or (3,3) T : numpy.ndarray of shape (...,3,3) or (3,3)
Tensor of which the transpose is computed. Tensor of which the transpose is computed.
""" """
return T.T if _np.shape(T) == (3,3) else \ return T.T if _np.shape(T) == (3,3) else \
_np.transpose(T,(0,2,1)) _np.swapaxes(T,axis2=-2,axis1=-1)
def _polar_decomposition(T,requested): def _polar_decomposition(T,requested):

View File

@ -157,6 +157,30 @@ class TestRotation:
print(m,o,rot.asQuaternion()) print(m,o,rot.asQuaternion())
assert ok and o.max() < np.pi**(2./3.)*0.5+1.e-9 assert ok and o.max() < np.pi**(2./3.)*0.5+1.e-9
@pytest.mark.parametrize('function',[Rotation.from_quaternion,
Rotation.from_Eulers,
Rotation.from_axis_angle,
Rotation.from_matrix,
Rotation.from_Rodrigues,
Rotation.from_homochoric])
def test_invalid_shape(self,function):
invalid_shape = np.random.random(np.random.randint(8,32,(3)))
with pytest.raises(ValueError):
function(invalid_shape)
@pytest.mark.parametrize('function,invalid',[(Rotation.from_quaternion, np.array([-1,0,0,0])),
(Rotation.from_quaternion, np.array([1,1,1,0])),
(Rotation.from_Eulers, np.array([1,4,0])),
(Rotation.from_axis_angle, np.array([1,0,0,4])),
(Rotation.from_axis_angle, np.array([1,1,0,1])),
(Rotation.from_matrix, np.random.rand(3,3)),
(Rotation.from_Rodrigues, np.array([1,0,0,-1])),
(Rotation.from_Rodrigues, np.array([1,1,0,1])),
(Rotation.from_homochoric, np.array([2,2,2])) ])
def test_invalid(self,function,invalid):
with pytest.raises(ValueError):
function(invalid)
@pytest.mark.parametrize('conversion',[Rotation.qu2om, @pytest.mark.parametrize('conversion',[Rotation.qu2om,
Rotation.qu2eu, Rotation.qu2eu,
Rotation.qu2ax, Rotation.qu2ax,

View File

@ -4,18 +4,18 @@ import numpy as np
from damask import grid_filters from damask import grid_filters
class TestGridFilters: class TestGridFilters:
def test_cell_coord0(self): def test_cell_coord0(self):
size = np.random.random(3) size = np.random.random(3)
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
coord = grid_filters.cell_coord0(grid,size) coord = grid_filters.cell_coord0(grid,size)
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid[::-1]) + (3,) assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid) + (3,)
def test_node_coord0(self): def test_node_coord0(self):
size = np.random.random(3) size = np.random.random(3)
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
coord = grid_filters.node_coord0(grid,size) coord = grid_filters.node_coord0(grid,size)
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid[::-1]+1) + (3,) assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid+1) + (3,)
def test_coord0(self): def test_coord0(self):
size = np.random.random(3) size = np.random.random(3)
@ -31,7 +31,7 @@ class TestGridFilters:
size = np.random.random(3) size = np.random.random(3)
origin = np.random.random(3) origin = np.random.random(3)
coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa
_grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3))'.format(mode)) _grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3,order="F"))'.format(mode))
assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin) assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin)
def test_displacement_fluct_equivalence(self): def test_displacement_fluct_equivalence(self):
@ -57,9 +57,9 @@ class TestGridFilters:
shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode))
unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode)) unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode))
if mode == 'cell': if mode == 'cell':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]) +(3,))) assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid) +(3,)))
elif mode == 'node': elif mode == 'node':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,))) assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid+1)+(3,)))
@pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg, @pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg,
grid_filters.node_displacement_avg]) grid_filters.node_displacement_avg])
@ -80,8 +80,43 @@ class TestGridFilters:
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3)) F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3))
assert np.allclose(function(size,F),0.0) assert np.allclose(function(size,F),0.0)
@pytest.mark.parametrize('function',[grid_filters.coord0_check,
grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_invalid_coordinates(self,function):
invalid_coordinates = np.random.random((np.random.randint(12,52),3))
with pytest.raises(ValueError):
function(invalid_coordinates)
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_uneven_spaced_coordinates(self,function):
start = np.random.random(3)
end = np.random.random(3)*10. + start
grid = np.random.randint(8,32,(3))
uneven = np.stack(np.meshgrid(np.logspace(start[0],end[0],grid[0]),
np.logspace(start[1],end[1],grid[1]),
np.logspace(start[2],end[2],grid[2]),indexing = 'ij'),
axis = -1).reshape((grid.prod(),3),order='F')
with pytest.raises(ValueError):
function(uneven)
@pytest.mark.parametrize('mode',[True,False])
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_unordered_coordinates(self,function,mode):
origin = np.random.random(3)
size = np.random.random(3)*10.+origin
grid = np.random.randint(8,32,(3))
unordered = grid_filters.node_coord0(grid,size,origin).reshape(-1,3)
if mode:
with pytest.raises(ValueError):
function(unordered,mode)
else:
function(unordered,mode)
def test_regrid(self): def test_regrid(self):
size = np.random.random(3) size = np.random.random(3)
grid = np.random.randint(8,32,(3)) grid = np.random.randint(8,32,(3))
F = np.broadcast_to(np.eye(3), tuple(grid[::-1])+(3,3)) F = np.broadcast_to(np.eye(3), tuple(grid)+(3,3))
assert all(grid_filters.regrid(size,F,grid) == np.arange(grid.prod())) assert all(grid_filters.regrid(size,F,grid) == np.arange(grid.prod()))

View File

@ -1,4 +1,6 @@
import pytest
import numpy as np import numpy as np
from damask import mechanics from damask import mechanics
class TestMechanics: class TestMechanics:
@ -7,127 +9,77 @@ class TestMechanics:
c = np.random.randint(n) c = np.random.randint(n)
def test_vectorize_Cauchy(self): @pytest.mark.parametrize('function',[mechanics.deviatoric_part,
P = np.random.random((self.n,3,3)) mechanics.eigenvalues,
F = np.random.random((self.n,3,3)) mechanics.eigenvectors,
assert np.allclose(mechanics.Cauchy(P,F)[self.c], mechanics.left_stretch,
mechanics.Cauchy(P[self.c],F[self.c])) mechanics.maximum_shear,
mechanics.Mises_strain,
mechanics.Mises_stress,
mechanics.right_stretch,
mechanics.rotational_part,
mechanics.spherical_part,
mechanics.symmetric,
mechanics.transpose,
])
def test_vectorize_1_arg(self,function):
epsilon = np.random.rand(self.n,3,3)
assert np.allclose(function(epsilon)[self.c],function(epsilon[self.c]))
def test_vectorize_deviatoric_part(self): @pytest.mark.parametrize('function',[mechanics.Cauchy,
x = np.random.random((self.n,3,3)) mechanics.PK2,
assert np.allclose(mechanics.deviatoric_part(x)[self.c], ])
mechanics.deviatoric_part(x[self.c])) def test_vectorize_2_arg(self,function):
P = np.random.rand(self.n,3,3)
def test_vectorize_eigenvalues(self): F = np.random.rand(self.n,3,3)
x = np.random.random((self.n,3,3)) assert np.allclose(function(P,F)[self.c],function(P[self.c],F[self.c]))
assert np.allclose(mechanics.eigenvalues(x)[self.c],
mechanics.eigenvalues(x[self.c]))
def test_vectorize_eigenvectors(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.eigenvectors(x)[self.c],
mechanics.eigenvectors(x[self.c]))
def test_vectorize_left_stretch(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.left_stretch(x)[self.c],
mechanics.left_stretch(x[self.c]))
def test_vectorize_maximum_shear(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.maximum_shear(x)[self.c],
mechanics.maximum_shear(x[self.c]))
def test_vectorize_Mises_strain(self):
epsilon = np.random.random((self.n,3,3))
assert np.allclose(mechanics.Mises_strain(epsilon)[self.c],
mechanics.Mises_strain(epsilon[self.c]))
def test_vectorize_Mises_stress(self):
sigma = np.random.random((self.n,3,3))
assert np.allclose(mechanics.Mises_stress(sigma)[self.c],
mechanics.Mises_stress(sigma[self.c]))
def test_vectorize_PK2(self):
F = np.random.random((self.n,3,3))
P = np.random.random((self.n,3,3))
assert np.allclose(mechanics.PK2(P,F)[self.c],
mechanics.PK2(P[self.c],F[self.c]))
def test_vectorize_right_stretch(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.right_stretch(x)[self.c],
mechanics.right_stretch(x[self.c]))
def test_vectorize_rotational_part(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.rotational_part(x)[self.c],
mechanics.rotational_part(x[self.c]))
def test_vectorize_spherical_part(self):
x = np.random.random((self.n,3,3))
assert np.allclose(mechanics.spherical_part(x,True)[self.c],
mechanics.spherical_part(x[self.c],True))
def test_vectorize_strain_tensor(self): def test_vectorize_strain_tensor(self):
F = np.random.random((self.n,3,3)) F = np.random.rand(self.n,3,3)
t = ['V','U'][np.random.randint(0,2)] t = ['V','U'][np.random.randint(0,2)]
m = np.random.random()*10. -5.0 m = np.random.random()*10. -5.0
assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c], assert np.allclose(mechanics.strain_tensor(F,t,m)[self.c],
mechanics.strain_tensor(F[self.c],t,m)) mechanics.strain_tensor(F[self.c],t,m))
def test_vectorize_symmetric(self): @pytest.mark.parametrize('function',[mechanics.Cauchy,
x = np.random.random((self.n,3,3)) mechanics.PK2,
assert np.allclose(mechanics.symmetric(x)[self.c], ])
mechanics.symmetric(x[self.c])) def test_stress_measures(self,function):
"""Ensure that all stress measures are equivalent for no deformation."""
def test_vectorize_transpose(self): P = np.random.rand(self.n,3,3)
x = np.random.random((self.n,3,3)) assert np.allclose(function(P,np.broadcast_to(np.eye(3),(self.n,3,3))),mechanics.symmetric(P))
assert np.allclose(mechanics.transpose(x)[self.c],
mechanics.transpose(x[self.c]))
def test_Cauchy(self):
"""Ensure Cauchy stress is symmetrized 1. Piola-Kirchhoff stress for no deformation."""
P = np.random.random((self.n,3,3))
assert np.allclose(mechanics.Cauchy(P,np.broadcast_to(np.eye(3),(self.n,3,3))),
mechanics.symmetric(P))
def test_deviatoric_part(self):
I_n = np.broadcast_to(np.eye(3),(self.n,3,3))
r = np.logical_not(I_n)*np.random.rand(self.n,3,3)
assert np.allclose(mechanics.deviatoric_part(I_n+r),r)
def test_polar_decomposition(self): def test_polar_decomposition(self):
"""F = RU = VR.""" """F = RU = VR."""
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.rand(self.n,3,3)
R = mechanics.rotational_part(F) R = mechanics.rotational_part(F)
V = mechanics.left_stretch(F) V = mechanics.left_stretch(F)
U = mechanics.right_stretch(F) U = mechanics.right_stretch(F)
assert np.allclose(np.matmul(R,U), assert np.allclose(np.matmul(R,U),
np.matmul(V,R)) np.matmul(V,R))
def test_PK2(self):
"""Ensure 2. Piola-Kirchhoff stress is symmetrized 1. Piola-Kirchhoff stress for no deformation."""
P = np.random.random((self.n,3,3))
assert np.allclose(mechanics.PK2(P,np.broadcast_to(np.eye(3),(self.n,3,3))),
mechanics.symmetric(P))
def test_strain_tensor_no_rotation(self): def test_strain_tensor_no_rotation(self):
"""Ensure that left and right stretch give same results for no rotation.""" """Ensure that left and right stretch give same results for no rotation."""
F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.random((self.n,3,3)) F = np.broadcast_to(np.eye(3),[self.n,3,3])*np.random.rand(self.n,3,3)
m = np.random.random()*20.0-10.0 m = np.random.random()*20.0-10.0
assert np.allclose(mechanics.strain_tensor(F,'U',m), assert np.allclose(mechanics.strain_tensor(F,'U',m),
mechanics.strain_tensor(F,'V',m)) mechanics.strain_tensor(F,'V',m))
def test_strain_tensor_rotation_equivalence(self): def test_strain_tensor_rotation_equivalence(self):
"""Ensure that left and right strain differ only by a rotation.""" """Ensure that left and right strain differ only by a rotation."""
F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.random((self.n,3,3))*0.5 - 0.25) F = np.broadcast_to(np.eye(3),[self.n,3,3]) + (np.random.rand(self.n,3,3)*0.5 - 0.25)
m = np.random.random()*5.0-2.5 m = np.random.random()*5.0-2.5
assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)), assert np.allclose(np.linalg.det(mechanics.strain_tensor(F,'U',m)),
np.linalg.det(mechanics.strain_tensor(F,'V',m))) np.linalg.det(mechanics.strain_tensor(F,'V',m)))
def test_strain_tensor_rotation(self): def test_strain_tensor_rotation(self):
"""Ensure that pure rotation results in no strain.""" """Ensure that pure rotation results in no strain."""
F = mechanics.rotational_part(np.random.random((self.n,3,3))) F = mechanics.rotational_part(np.random.rand(self.n,3,3))
t = ['V','U'][np.random.randint(0,2)] t = ['V','U'][np.random.randint(0,2)]
m = np.random.random()*2.0 - 1.0 m = np.random.random()*2.0 - 1.0
assert np.allclose(mechanics.strain_tensor(F,t,m), assert np.allclose(mechanics.strain_tensor(F,t,m),
@ -139,21 +91,20 @@ class TestMechanics:
Should be +1, but random F might contain a reflection. Should be +1, but random F might contain a reflection.
""" """
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))), assert np.allclose(np.abs(np.linalg.det(mechanics.rotational_part(x))),
1.0) 1.0)
def test_spherical_deviatoric_part(self): def test_spherical_deviatoric_part(self):
"""Ensure that full tensor is sum of spherical and deviatoric part.""" """Ensure that full tensor is sum of spherical and deviatoric part."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
sph = mechanics.spherical_part(x,True) sph = mechanics.spherical_part(x,True)
assert np.allclose(sph + mechanics.deviatoric_part(x), assert np.allclose(sph + mechanics.deviatoric_part(x),
x) x)
def test_deviatoric_Mises(self): def test_deviatoric_Mises(self):
"""Ensure that Mises equivalent stress depends only on deviatoric part.""" """Ensure that Mises equivalent stress depends only on deviatoric part."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
full = mechanics.Mises_stress(x) full = mechanics.Mises_stress(x)
dev = mechanics.Mises_stress(mechanics.deviatoric_part(x)) dev = mechanics.Mises_stress(mechanics.deviatoric_part(x))
assert np.allclose(full, assert np.allclose(full,
@ -161,7 +112,7 @@ class TestMechanics:
def test_spherical_mapping(self): def test_spherical_mapping(self):
"""Ensure that mapping to tensor is correct.""" """Ensure that mapping to tensor is correct."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
tensor = mechanics.spherical_part(x,True) tensor = mechanics.spherical_part(x,True)
scalar = mechanics.spherical_part(x) scalar = mechanics.spherical_part(x)
assert np.allclose(np.linalg.det(tensor), assert np.allclose(np.linalg.det(tensor),
@ -169,35 +120,32 @@ class TestMechanics:
def test_spherical_Mises(self): def test_spherical_Mises(self):
"""Ensure that Mises equivalent strrain of spherical strain is 0.""" """Ensure that Mises equivalent strrain of spherical strain is 0."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
sph = mechanics.spherical_part(x,True) sph = mechanics.spherical_part(x,True)
assert np.allclose(mechanics.Mises_strain(sph), assert np.allclose(mechanics.Mises_strain(sph),
0.0) 0.0)
def test_symmetric(self): def test_symmetric(self):
"""Ensure that a symmetric tensor is half of the sum of a tensor and its transpose.""" """Ensure that a symmetric tensor is half of the sum of a tensor and its transpose."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
assert np.allclose(mechanics.symmetric(x)*2.0, assert np.allclose(mechanics.symmetric(x)*2.0,
mechanics.transpose(x)+x) mechanics.transpose(x)+x)
def test_transpose(self): def test_transpose(self):
"""Ensure that a symmetric tensor equals its transpose.""" """Ensure that a symmetric tensor equals its transpose."""
x = mechanics.symmetric(np.random.random((self.n,3,3))) x = mechanics.symmetric(np.random.rand(self.n,3,3))
assert np.allclose(mechanics.transpose(x), assert np.allclose(mechanics.transpose(x),
x) x)
def test_Mises(self): def test_Mises(self):
"""Ensure that equivalent stress is 3/2 of equivalent strain.""" """Ensure that equivalent stress is 3/2 of equivalent strain."""
x = np.random.random((self.n,3,3)) x = np.random.rand(self.n,3,3)
assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x), assert np.allclose(mechanics.Mises_stress(x)/mechanics.Mises_strain(x),
1.5) 1.5)
def test_eigenvalues(self): def test_eigenvalues(self):
"""Ensure that the characteristic polynomial can be solved.""" """Ensure that the characteristic polynomial can be solved."""
A = mechanics.symmetric(np.random.random((self.n,3,3))) A = mechanics.symmetric(np.random.rand(self.n,3,3))
lambd = mechanics.eigenvalues(A) lambd = mechanics.eigenvalues(A)
s = np.random.randint(self.n) s = np.random.randint(self.n)
for i in range(3): for i in range(3):
@ -205,7 +153,7 @@ class TestMechanics:
def test_eigenvalues_and_vectors(self): def test_eigenvalues_and_vectors(self):
"""Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial.""" """Ensure that eigenvalues and -vectors are the solution to the characteristic polynomial."""
A = mechanics.symmetric(np.random.random((self.n,3,3))) A = mechanics.symmetric(np.random.rand(self.n,3,3))
lambd = mechanics.eigenvalues(A) lambd = mechanics.eigenvalues(A)
x = mechanics.eigenvectors(A) x = mechanics.eigenvectors(A)
s = np.random.randint(self.n) s = np.random.randint(self.n)
@ -214,12 +162,12 @@ class TestMechanics:
def test_eigenvectors_RHS(self): def test_eigenvectors_RHS(self):
"""Ensure that RHS coordinate system does only change sign of determinant.""" """Ensure that RHS coordinate system does only change sign of determinant."""
A = mechanics.symmetric(np.random.random((self.n,3,3))) A = mechanics.symmetric(np.random.rand(self.n,3,3))
LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False)) LRHS = np.linalg.det(mechanics.eigenvectors(A,RHS=False))
RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True)) RHS = np.linalg.det(mechanics.eigenvectors(A,RHS=True))
assert np.allclose(np.abs(LRHS),RHS) assert np.allclose(np.abs(LRHS),RHS)
def test_spherical_no_shear(self): def test_spherical_no_shear(self):
"""Ensure that sherical stress has max shear of 0.0.""" """Ensure that sherical stress has max shear of 0.0."""
A = mechanics.spherical_part(mechanics.symmetric(np.random.random((self.n,3,3))),True) A = mechanics.spherical_part(mechanics.symmetric(np.random.rand(self.n,3,3)),True)
assert np.allclose(mechanics.maximum_shear(A),0.0) assert np.allclose(mechanics.maximum_shear(A),0.0)

View File

@ -74,29 +74,25 @@ subroutine CPFEM_initAll(el,ip)
integer(pInt), intent(in) :: el, & !< FE el number integer(pInt), intent(in) :: el, & !< FE el number
ip !< FE integration point number ip !< FE integration point number
!$OMP CRITICAL(init) CPFEM_init_done = .true.
if (.not. CPFEM_init_done) then call DAMASK_interface_init
call DAMASK_interface_init call prec_init
call prec_init call IO_init
call IO_init call numerics_init
call numerics_init call debug_init
call debug_init call config_init
call config_init call math_init
call math_init call rotations_init
call rotations_init call YAML_types_init
call YAML_types_init call HDF5_utilities_init
call HDF5_utilities_init call results_init
call results_init call discretization_marc_init(ip, el)
call discretization_marc_init(ip, el) call lattice_init
call lattice_init call material_init
call material_init call constitutive_init
call constitutive_init call crystallite_init
call crystallite_init call homogenization_init
call homogenization_init call CPFEM_init
call CPFEM_init
CPFEM_init_done = .true.
endif
!$OMP END CRITICAL(init)
end subroutine CPFEM_initAll end subroutine CPFEM_initAll

View File

@ -261,11 +261,10 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
endif endif
!$ defaultNumThreadsInt = omp_get_num_threads() ! remember number of threads set by Marc !$ defaultNumThreadsInt = omp_get_num_threads() ! remember number of threads set by Marc
!$ call omp_set_num_threads(DAMASK_NumThreadsInt) ! set number of threads for parallel execution set by DAMASK_NUM_THREADS
if (.not. CPFEM_init_done) call CPFEM_initAll(m(1),nn) if (.not. CPFEM_init_done) call CPFEM_initAll(m(1),nn)
!$ call omp_set_num_threads(DAMASK_NumThreadsInt) ! set number of threads for parallel execution set by DAMASK_NUM_THREADS
computationMode = 0 ! save initialization value, since it does not result in any calculation computationMode = 0 ! save initialization value, since it does not result in any calculation
if (lovl == 4 ) then ! jacobian requested by marc if (lovl == 4 ) then ! jacobian requested by marc
if (timinc < theDelta .and. theInc == inc .and. lastLovl /= lovl) & ! first after cutback if (timinc < theDelta .and. theInc == inc .and. lastLovl /= lovl) & ! first after cutback

View File

@ -327,7 +327,7 @@ module constitutive
constitutive_initialFi, & constitutive_initialFi, &
constitutive_SandItsTangents, & constitutive_SandItsTangents, &
constitutive_collectDotState, & constitutive_collectDotState, &
constitutive_collectDeltaState, & constitutive_deltaState, &
constitutive_results constitutive_results
contains contains
@ -709,12 +709,14 @@ end subroutine constitutive_hooke_SandItsTangents
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief contains the constitutive equation for calculating the rate of change of microstructure !> @brief contains the constitutive equation for calculating the rate of change of microstructure
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip, el) function constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip, el,phase,of) result(broken)
integer, intent(in) :: & integer, intent(in) :: &
ipc, & !< component-ID of integration point ipc, & !< component-ID of integration point
ip, & !< integration point ip, & !< integration point
el !< element el, & !< element
phase, &
of
real(pReal), intent(in) :: & real(pReal), intent(in) :: &
subdt !< timestep subdt !< timestep
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem) :: & real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem) :: &
@ -730,16 +732,16 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
ho, & !< homogenization ho, & !< homogenization
tme, & !< thermal member position tme, & !< thermal member position
i, & !< counter in source loop i, & !< counter in source loop
instance, of instance
logical :: broken
ho = material_homogenizationAt(el) ho = material_homogenizationAt(el)
tme = thermalMapping(ho)%p(ip,el) tme = thermalMapping(ho)%p(ip,el)
of = material_phasememberAt(ipc,ip,el) instance = phase_plasticityInstance(phase)
instance = phase_plasticityInstance(material_phaseAt(ipc,el))
Mp = matmul(matmul(transpose(Fi),Fi),S) Mp = matmul(matmul(transpose(Fi),Fi),S)
plasticityType: select case (phase_plasticity(material_phaseAt(ipc,el))) plasticityType: select case (phase_plasticity(phase))
case (PLASTICITY_ISOTROPIC_ID) plasticityType case (PLASTICITY_ISOTROPIC_ID) plasticityType
call plastic_isotropic_dotState (Mp,instance,of) call plastic_isotropic_dotState (Mp,instance,of)
@ -760,10 +762,11 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
call plastic_nonlocal_dotState (Mp,FArray,FpArray,temperature(ho)%p(tme),subdt, & call plastic_nonlocal_dotState (Mp,FArray,FpArray,temperature(ho)%p(tme),subdt, &
instance,of,ip,el) instance,of,ip,el)
end select plasticityType end select plasticityType
broken = any(IEEE_is_NaN(plasticState(phase)%dotState(:,of)))
SourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el)) SourceLoop: do i = 1, phase_Nsources(phase)
sourceType: select case (phase_source(i,material_phaseAt(ipc,el))) sourceType: select case (phase_source(i,phase))
case (SOURCE_damage_anisoBrittle_ID) sourceType case (SOURCE_damage_anisoBrittle_ID) sourceType
call source_damage_anisoBrittle_dotState (S, ipc, ip, el) !< correct stress? call source_damage_anisoBrittle_dotState (S, ipc, ip, el) !< correct stress?
@ -775,25 +778,29 @@ subroutine constitutive_collectDotState(S, FArray, Fi, FpArray, subdt, ipc, ip,
call source_damage_anisoDuctile_dotState ( ipc, ip, el) call source_damage_anisoDuctile_dotState ( ipc, ip, el)
case (SOURCE_thermal_externalheat_ID) sourceType case (SOURCE_thermal_externalheat_ID) sourceType
call source_thermal_externalheat_dotState(material_phaseAt(ipc,el),of) call source_thermal_externalheat_dotState(phase,of)
end select sourceType end select sourceType
broken = broken .or. any(IEEE_is_NaN(sourceState(phase)%p(i)%dotState(:,of)))
enddo SourceLoop enddo SourceLoop
end subroutine constitutive_collectDotState end function constitutive_collectDotState
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief for constitutive models having an instantaneous change of state !> @brief for constitutive models having an instantaneous change of state
!> will return false if delta state is not needed/supported by the constitutive model !> will return false if delta state is not needed/supported by the constitutive model
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el) function constitutive_deltaState(S, Fe, Fi, ipc, ip, el, phase, of) result(broken)
integer, intent(in) :: & integer, intent(in) :: &
ipc, & !< component-ID of integration point ipc, & !< component-ID of integration point
ip, & !< integration point ip, & !< integration point
el !< element el, & !< element
phase, &
of
real(pReal), intent(in), dimension(3,3) :: & real(pReal), intent(in), dimension(3,3) :: &
S, & !< 2nd Piola Kirchhoff stress S, & !< 2nd Piola Kirchhoff stress
Fe, & !< elastic deformation gradient Fe, & !< elastic deformation gradient
@ -802,35 +809,62 @@ subroutine constitutive_collectDeltaState(S, Fe, Fi, ipc, ip, el)
Mp Mp
integer :: & integer :: &
i, & i, &
instance, of instance, &
myOffset, &
mySize
logical :: &
broken
Mp = matmul(matmul(transpose(Fi),Fi),S) Mp = matmul(matmul(transpose(Fi),Fi),S)
of = material_phasememberAt(ipc,ip,el) instance = phase_plasticityInstance(phase)
instance = phase_plasticityInstance(material_phaseAt(ipc,el))
plasticityType: select case (phase_plasticity(material_phaseAt(ipc,el))) plasticityType: select case (phase_plasticity(phase))
case (PLASTICITY_KINEHARDENING_ID) plasticityType case (PLASTICITY_KINEHARDENING_ID) plasticityType
call plastic_kinehardening_deltaState(Mp,instance,of) call plastic_kinehardening_deltaState(Mp,instance,of)
broken = any(IEEE_is_NaN(plasticState(phase)%deltaState(:,of)))
case (PLASTICITY_NONLOCAL_ID) plasticityType case (PLASTICITY_NONLOCAL_ID) plasticityType
call plastic_nonlocal_deltaState(Mp,instance,of,ip,el) call plastic_nonlocal_deltaState(Mp,instance,of,ip,el)
broken = any(IEEE_is_NaN(plasticState(phase)%deltaState(:,of)))
case default
broken = .false.
end select plasticityType end select plasticityType
sourceLoop: do i = 1, phase_Nsources(material_phaseAt(ipc,el)) if(.not. broken) then
select case(phase_plasticity(phase))
case (PLASTICITY_NONLOCAL_ID,PLASTICITY_KINEHARDENING_ID)
sourceType: select case (phase_source(i,material_phaseAt(ipc,el))) myOffset = plasticState(phase)%offsetDeltaState
mySize = plasticState(phase)%sizeDeltaState
plasticState(phase)%state(myOffset + 1:myOffset + mySize,of) = &
plasticState(phase)%state(myOffset + 1:myOffset + mySize,of) + plasticState(phase)%deltaState(1:mySize,of)
end select
endif
sourceLoop: do i = 1, phase_Nsources(phase)
sourceType: select case (phase_source(i,phase))
case (SOURCE_damage_isoBrittle_ID) sourceType case (SOURCE_damage_isoBrittle_ID) sourceType
call source_damage_isoBrittle_deltaState (constitutive_homogenizedC(ipc,ip,el), Fe, & call source_damage_isoBrittle_deltaState (constitutive_homogenizedC(ipc,ip,el), Fe, &
ipc, ip, el) ipc, ip, el)
broken = broken .or. any(IEEE_is_NaN(sourceState(phase)%p(i)%deltaState(:,of)))
if(.not. broken) then
myOffset = sourceState(phase)%p(i)%offsetDeltaState
mySize = sourceState(phase)%p(i)%sizeDeltaState
sourceState(phase)%p(i)%state(myOffset + 1: myOffset + mySize,of) = &
sourceState(phase)%p(i)%state(myOffset + 1: myOffset + mySize,of) + sourceState(phase)%p(i)%deltaState(1:mySize,of)
endif
end select sourceType end select sourceType
enddo SourceLoop enddo SourceLoop
end subroutine constitutive_collectDeltaState end function constitutive_deltaState
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------

View File

@ -209,7 +209,7 @@ module subroutine plastic_disloUCLA_init
sizeDotState = size(['rho_mob ','rho_dip ','gamma_sl']) * prm%sum_N_sl sizeDotState = size(['rho_mob ','rho_dip ','gamma_sl']) * prm%sum_N_sl
sizeState = sizeDotState sizeState = sizeDotState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! state aliases and initialization ! state aliases and initialization

View File

@ -399,7 +399,7 @@ module subroutine plastic_dislotwin_init
+ size(['f_tr']) * prm%sum_N_tr + size(['f_tr']) * prm%sum_N_tr
sizeState = sizeDotState sizeState = sizeDotState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! locally defined state aliases and initialization of state0 and atol ! locally defined state aliases and initialization of state0 and atol

View File

@ -117,7 +117,7 @@ module subroutine plastic_isotropic_init
sizeDotState = size(['xi ','accumulated_shear']) sizeDotState = size(['xi ','accumulated_shear'])
sizeState = sizeDotState sizeState = sizeDotState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! state aliases and initialization ! state aliases and initialization

View File

@ -164,7 +164,7 @@ module subroutine plastic_kinehardening_init
sizeDeltaState = size(['sense ', 'chi0 ', 'gamma0' ]) * prm%sum_N_sl sizeDeltaState = size(['sense ', 'chi0 ', 'gamma0' ]) * prm%sum_N_sl
sizeState = sizeDotState + sizeDeltaState sizeState = sizeDotState + sizeDeltaState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! state aliases and initialization ! state aliases and initialization

View File

@ -29,7 +29,7 @@ module subroutine plastic_none_init
if (phase_plasticity(p) /= PLASTICITY_NONE_ID) cycle if (phase_plasticity(p) /= PLASTICITY_NONE_ID) cycle
NipcMyPhase = count(material_phaseAt == p) * discretization_nIP NipcMyPhase = count(material_phaseAt == p) * discretization_nIP
call material_allocatePlasticState(p,NipcMyPhase,0,0,0) call material_allocateState(plasticState(p),NipcMyPhase,0,0,0)
enddo enddo

View File

@ -320,6 +320,7 @@ module subroutine plastic_nonlocal_init
prm%fEdgeMultiplication = config%getFloat('edgemultiplication') prm%fEdgeMultiplication = config%getFloat('edgemultiplication')
prm%shortRangeStressCorrection = config%keyExists('/shortrangestresscorrection/') prm%shortRangeStressCorrection = config%keyExists('/shortrangestresscorrection/')
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! sanity checks ! sanity checks
if (any(prm%burgers < 0.0_pReal)) extmsg = trim(extmsg)//' burgers' if (any(prm%burgers < 0.0_pReal)) extmsg = trim(extmsg)//' burgers'
@ -384,9 +385,9 @@ module subroutine plastic_nonlocal_init
'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%sum_N_sl !< other dependent state variables that are not updated by microstructure 'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%sum_N_sl !< other dependent state variables that are not updated by microstructure
sizeDeltaState = sizeDotState sizeDeltaState = sizeDotState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,sizeDeltaState) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
plasticState(p)%nonlocal = .true. plasticState(p)%nonlocal = config%KeyExists('/nonlocal/')
plasticState(p)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention plasticState(p)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention
st0%rho => plasticState(p)%state0 (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:) st0%rho => plasticState(p)%state0 (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
@ -961,39 +962,24 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
integer :: & integer :: &
ph, & ph, &
neighbor_instance, & !< instance of my neighbor's plasticity
ns, & !< short notation for the total number of active slip systems ns, & !< short notation for the total number of active slip systems
c, & !< character of dislocation c, & !< character of dislocation
n, & !< index of my current neighbor
neighbor_el, & !< element number of my neighbor
neighbor_ip, & !< integration point of my neighbor
neighbor_n, & !< neighbor index pointing to me when looking from my neighbor
opposite_neighbor, & !< index of my opposite neighbor
opposite_ip, & !< ip of my opposite neighbor
opposite_el, & !< element index of my opposite neighbor
opposite_n, & !< neighbor index pointing to me when looking from my opposite neighbor
t, & !< type of dislocation t, & !< type of dislocation
no,& !< neighbor offset shortcut
np,& !< neighbor phase shortcut
topp, & !< type of dislocation with opposite sign to t
s !< index of my current slip system s !< index of my current slip system
real(pReal), dimension(param(instance)%sum_N_sl,10) :: & real(pReal), dimension(param(instance)%sum_N_sl,10) :: &
rho, & rho, &
rho0, & !< dislocation density at beginning of time step rho0, & !< dislocation density at beginning of time step
rhoDot, & !< density evolution rhoDot, & !< density evolution
rhoDotMultiplication, & !< density evolution by multiplication rhoDotMultiplication, & !< density evolution by multiplication
rhoDotFlux, & !< density evolution by flux
rhoDotSingle2DipoleGlide, & !< density evolution by dipole formation (by glide) rhoDotSingle2DipoleGlide, & !< density evolution by dipole formation (by glide)
rhoDotAthermalAnnihilation, & !< density evolution by athermal annihilation rhoDotAthermalAnnihilation, & !< density evolution by athermal annihilation
rhoDotThermalAnnihilation !< density evolution by thermal annihilation rhoDotThermalAnnihilation !< density evolution by thermal annihilation
real(pReal), dimension(param(instance)%sum_N_sl,8) :: & real(pReal), dimension(param(instance)%sum_N_sl,8) :: &
rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles) rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles)
neighbor_rhoSgl0, & !< current single dislocation densities of neighboring ip (positive/negative screw and edge without dipoles)
my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles) my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles)
real(pReal), dimension(param(instance)%sum_N_sl,4) :: & real(pReal), dimension(param(instance)%sum_N_sl,4) :: &
v, & !< current dislocation glide velocity v, & !< current dislocation glide velocity
v0, & v0, &
neighbor_v0, & !< dislocation glide velocity of enighboring ip
gdot !< shear rates gdot !< shear rates
real(pReal), dimension(param(instance)%sum_N_sl) :: & real(pReal), dimension(param(instance)%sum_N_sl) :: &
tau, & !< current resolved shear stress tau, & !< current resolved shear stress
@ -1002,23 +988,7 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
rhoDip, & !< current dipole dislocation densities (screw and edge dipoles) rhoDip, & !< current dipole dislocation densities (screw and edge dipoles)
dLower, & !< minimum stable dipole distance for edges and screws dLower, & !< minimum stable dipole distance for edges and screws
dUpper !< current maximum stable dipole distance for edges and screws dUpper !< current maximum stable dipole distance for edges and screws
real(pReal), dimension(3,param(instance)%sum_N_sl,4) :: &
m !< direction of dislocation motion
real(pReal), dimension(3,3) :: &
my_F, & !< my total deformation gradient
neighbor_F, & !< total deformation gradient of my neighbor
my_Fe, & !< my elastic deformation gradient
neighbor_Fe, & !< elastic deformation gradient of my neighbor
Favg !< average total deformation gradient of me and my neighbor
real(pReal), dimension(3) :: &
normal_neighbor2me, & !< interface normal pointing from my neighbor to me in neighbor's lattice configuration
normal_neighbor2me_defConf, & !< interface normal pointing from my neighbor to me in shared deformed configuration
normal_me2neighbor, & !< interface normal pointing from me to my neighbor in my lattice configuration
normal_me2neighbor_defConf !< interface normal pointing from me to my neighbor in shared deformed configuration
real(pReal) :: & real(pReal) :: &
area, & !< area of the current interface
transmissivity, & !< overall transmissivity of dislocation flux to neighboring material point
lineLength, & !< dislocation line length leaving the current interface
selfDiffusion !< self diffusion selfDiffusion !< self diffusion
ph = material_phaseAt(1,el) ph = material_phaseAt(1,el)
@ -1091,6 +1061,172 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
* sqrt(stt%rho_forest(:,of)) / prm%lambda0 / prm%burgers, 2, 4) * sqrt(stt%rho_forest(:,of)) / prm%lambda0 / prm%burgers, 2, 4)
endif isBCC endif isBCC
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of)
!****************************************************************************
!*** calculate dipole formation and annihilation
!*** formation by glide
do c = 1,2
rhoDotSingle2DipoleGlide(:,2*c-1) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
+ abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) ! positive mobile --> negative immobile
rhoDotSingle2DipoleGlide(:,2*c) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
+ abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c))) ! negative mobile --> positive immobile
rhoDotSingle2DipoleGlide(:,2*c+3) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* rhoSgl(:,2*c+3) * abs(gdot(:,2*c)) ! negative mobile --> positive immobile
rhoDotSingle2DipoleGlide(:,2*c+4) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* rhoSgl(:,2*c+4) * abs(gdot(:,2*c-1)) ! positive mobile --> negative immobile
rhoDotSingle2DipoleGlide(:,c+8) = abs(rhoDotSingle2DipoleGlide(:,2*c+3)) &
+ abs(rhoDotSingle2DipoleGlide(:,2*c+4)) &
- rhoDotSingle2DipoleGlide(:,2*c-1) &
- rhoDotSingle2DipoleGlide(:,2*c)
enddo
!*** athermal annihilation
rhoDotAthermalAnnihilation = 0.0_pReal
forall (c=1:2) &
rhoDotAthermalAnnihilation(:,c+8) = -2.0_pReal * dLower(:,c) / prm%burgers &
* ( 2.0_pReal * (rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) + rhoSgl(:,2*c) * abs(gdot(:,2*c-1))) & ! was single hitting single
+ 2.0_pReal * (abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c)) + abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) & ! was single hitting immobile single or was immobile single hit by single
+ rhoDip(:,c) * (abs(gdot(:,2*c-1)) + abs(gdot(:,2*c)))) ! single knocks dipole constituent
! annihilated screw dipoles leave edge jogs behind on the colinear system
if (lattice_structure(ph) == LATTICE_fcc_ID) &
forall (s = 1:ns, prm%colinearSystem(s) > 0) &
rhoDotAthermalAnnihilation(prm%colinearSystem(s),1:2) = - rhoDotAthermalAnnihilation(s,10) &
* 0.25_pReal * sqrt(stt%rho_forest(s,of)) * (dUpper(s,2) + dLower(s,2)) * prm%edgeJogFactor
!*** thermally activated annihilation of edge dipoles by climb
rhoDotThermalAnnihilation = 0.0_pReal
selfDiffusion = prm%Dsd0 * exp(-prm%selfDiffusionEnergy / (kB * Temperature))
vClimb = prm%atomicVolume * selfDiffusion * prm%mu &
/ ( kB * Temperature * PI * (1.0_pReal-prm%nu) * (dUpper(:,1) + dLower(:,1)))
forall (s = 1:ns, dUpper(s,1) > dLower(s,1)) &
rhoDotThermalAnnihilation(s,9) = max(- 4.0_pReal * rhoDip(s,1) * vClimb(s) / (dUpper(s,1) - dLower(s,1)), &
- rhoDip(s,1) / timestep - rhoDotAthermalAnnihilation(s,9) &
- rhoDotSingle2DipoleGlide(s,9)) ! make sure that we do not annihilate more dipoles than we have
rhoDot = rhoDotFlux(F,Fp,timestep, instance,of,ip,el) &
+ rhoDotMultiplication &
+ rhoDotSingle2DipoleGlide &
+ rhoDotAthermalAnnihilation &
+ rhoDotThermalAnnihilation
if ( any(rho(:,mob) + rhoDot(:,1:4) * timestep < -prm%atol_rho) &
.or. any(rho(:,dip) + rhoDot(:,9:10) * timestep < -prm%atol_rho)) then
#ifdef DEBUG
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0) then
write(6,'(a,i5,a,i2)') '<< CONST >> evolution rate leads to negative density at el ',el,' ip ',ip
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
endif
#endif
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN)
else
dot%rho(:,of) = pack(rhoDot,.true.)
dot%gamma(:,of) = sum(gdot,2)
endif
end associate
end subroutine plastic_nonlocal_dotState
!---------------------------------------------------------------------------------------------------
!> @brief calculates the rate of change of microstructure
!---------------------------------------------------------------------------------------------------
function rhoDotFlux(F,Fp,timestep, instance,of,ip,el)
real(pReal), dimension(3,3,homogenization_maxNgrains,discretization_nIP,discretization_nElem), intent(in) :: &
F, & !< elastic deformation gradient
Fp !< plastic deformation gradient
real(pReal), intent(in) :: &
timestep !< substepped crystallite time increment
integer, intent(in) :: &
instance, &
of, &
ip, & !< current integration point
el !< current element number
integer :: &
ph, &
neighbor_instance, & !< instance of my neighbor's plasticity
ns, & !< short notation for the total number of active slip systems
c, & !< character of dislocation
n, & !< index of my current neighbor
neighbor_el, & !< element number of my neighbor
neighbor_ip, & !< integration point of my neighbor
neighbor_n, & !< neighbor index pointing to me when looking from my neighbor
opposite_neighbor, & !< index of my opposite neighbor
opposite_ip, & !< ip of my opposite neighbor
opposite_el, & !< element index of my opposite neighbor
opposite_n, & !< neighbor index pointing to me when looking from my opposite neighbor
t, & !< type of dislocation
no,& !< neighbor offset shortcut
np,& !< neighbor phase shortcut
topp, & !< type of dislocation with opposite sign to t
s !< index of my current slip system
real(pReal), dimension(param(instance)%sum_N_sl,10) :: &
rho, &
rho0, & !< dislocation density at beginning of time step
rhoDotFlux !< density evolution by flux
real(pReal), dimension(param(instance)%sum_N_sl,8) :: &
rhoSgl, & !< current single dislocation densities (positive/negative screw and edge without dipoles)
neighbor_rhoSgl0, & !< current single dislocation densities of neighboring ip (positive/negative screw and edge without dipoles)
my_rhoSgl0 !< single dislocation densities of central ip (positive/negative screw and edge without dipoles)
real(pReal), dimension(param(instance)%sum_N_sl,4) :: &
v, & !< current dislocation glide velocity
v0, &
neighbor_v0, & !< dislocation glide velocity of enighboring ip
gdot !< shear rates
real(pReal), dimension(3,param(instance)%sum_N_sl,4) :: &
m !< direction of dislocation motion
real(pReal), dimension(3,3) :: &
my_F, & !< my total deformation gradient
neighbor_F, & !< total deformation gradient of my neighbor
my_Fe, & !< my elastic deformation gradient
neighbor_Fe, & !< elastic deformation gradient of my neighbor
Favg !< average total deformation gradient of me and my neighbor
real(pReal), dimension(3) :: &
normal_neighbor2me, & !< interface normal pointing from my neighbor to me in neighbor's lattice configuration
normal_neighbor2me_defConf, & !< interface normal pointing from my neighbor to me in shared deformed configuration
normal_me2neighbor, & !< interface normal pointing from me to my neighbor in my lattice configuration
normal_me2neighbor_defConf !< interface normal pointing from me to my neighbor in shared deformed configuration
real(pReal) :: &
area, & !< area of the current interface
transmissivity, & !< overall transmissivity of dislocation flux to neighboring material point
lineLength !< dislocation line length leaving the current interface
ph = material_phaseAt(1,el)
associate(prm => param(instance), &
dst => microstructure(instance), &
dot => dotState(instance), &
stt => state(instance))
ns = prm%sum_N_sl
gdot = 0.0_pReal
rho = getRho(instance,of,ip,el)
rhoSgl = rho(:,sgl)
rho0 = getRho0(instance,of,ip,el)
my_rhoSgl0 = rho0(:,sgl)
forall (s = 1:ns, t = 1:4) v(s,t) = plasticState(ph)%state(iV(s,t,instance),of) !ToDo: MD: I think we should use state0 here
gdot = rhoSgl(:,1:4) * v * spread(prm%burgers,2,4)
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of) forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,instance),of)
!**************************************************************************** !****************************************************************************
@ -1113,7 +1249,7 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
write(6,'(a)') '<< CONST >> enforcing cutback !!!' write(6,'(a)') '<< CONST >> enforcing cutback !!!'
endif endif
#endif #endif
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN) ! -> return NaN and, hence, enforce cutback rhoDotFlux = IEEE_value(1.0_pReal,IEEE_quiet_NaN) ! enforce cutback
return return
endif endif
@ -1239,108 +1375,9 @@ module subroutine plastic_nonlocal_dotState(Mp, F, Fp, Temperature,timestep, &
enddo neighbors enddo neighbors
endif endif
!****************************************************************************
!*** calculate dipole formation and annihilation
!*** formation by glide
do c = 1,2
rhoDotSingle2DipoleGlide(:,2*c-1) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
+ abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) ! positive mobile --> negative immobile
rhoDotSingle2DipoleGlide(:,2*c) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* ( rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) & ! negative mobile --> positive mobile
+ rhoSgl(:,2*c) * abs(gdot(:,2*c-1)) & ! positive mobile --> negative mobile
+ abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c))) ! negative mobile --> positive immobile
rhoDotSingle2DipoleGlide(:,2*c+3) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* rhoSgl(:,2*c+3) * abs(gdot(:,2*c)) ! negative mobile --> positive immobile
rhoDotSingle2DipoleGlide(:,2*c+4) = -2.0_pReal * dUpper(:,c) / prm%burgers &
* rhoSgl(:,2*c+4) * abs(gdot(:,2*c-1)) ! positive mobile --> negative immobile
rhoDotSingle2DipoleGlide(:,c+8) = abs(rhoDotSingle2DipoleGlide(:,2*c+3)) &
+ abs(rhoDotSingle2DipoleGlide(:,2*c+4)) &
- rhoDotSingle2DipoleGlide(:,2*c-1) &
- rhoDotSingle2DipoleGlide(:,2*c)
enddo
!*** athermal annihilation
rhoDotAthermalAnnihilation = 0.0_pReal
forall (c=1:2) &
rhoDotAthermalAnnihilation(:,c+8) = -2.0_pReal * dLower(:,c) / prm%burgers &
* ( 2.0_pReal * (rhoSgl(:,2*c-1) * abs(gdot(:,2*c)) + rhoSgl(:,2*c) * abs(gdot(:,2*c-1))) & ! was single hitting single
+ 2.0_pReal * (abs(rhoSgl(:,2*c+3)) * abs(gdot(:,2*c)) + abs(rhoSgl(:,2*c+4)) * abs(gdot(:,2*c-1))) & ! was single hitting immobile single or was immobile single hit by single
+ rhoDip(:,c) * (abs(gdot(:,2*c-1)) + abs(gdot(:,2*c)))) ! single knocks dipole constituent
! annihilated screw dipoles leave edge jogs behind on the colinear system
if (lattice_structure(ph) == LATTICE_fcc_ID) &
forall (s = 1:ns, prm%colinearSystem(s) > 0) &
rhoDotAthermalAnnihilation(prm%colinearSystem(s),1:2) = - rhoDotAthermalAnnihilation(s,10) &
* 0.25_pReal * sqrt(stt%rho_forest(s,of)) * (dUpper(s,2) + dLower(s,2)) * prm%edgeJogFactor
!*** thermally activated annihilation of edge dipoles by climb
rhoDotThermalAnnihilation = 0.0_pReal
selfDiffusion = prm%Dsd0 * exp(-prm%selfDiffusionEnergy / (kB * Temperature))
vClimb = prm%atomicVolume * selfDiffusion * prm%mu &
/ ( kB * Temperature * PI * (1.0_pReal-prm%nu) * (dUpper(:,1) + dLower(:,1)))
forall (s = 1:ns, dUpper(s,1) > dLower(s,1)) &
rhoDotThermalAnnihilation(s,9) = max(- 4.0_pReal * rhoDip(s,1) * vClimb(s) / (dUpper(s,1) - dLower(s,1)), &
- rhoDip(s,1) / timestep - rhoDotAthermalAnnihilation(s,9) &
- rhoDotSingle2DipoleGlide(s,9)) ! make sure that we do not annihilate more dipoles than we have
rhoDot = rhoDotFlux &
+ rhoDotMultiplication &
+ rhoDotSingle2DipoleGlide &
+ rhoDotAthermalAnnihilation &
+ rhoDotThermalAnnihilation
#ifdef DEBUG
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0 &
.and. ((debug_e == el .and. debug_i == ip)&
.or. .not. iand(debug_level(debug_constitutive),debug_levelSelective) /= 0 )) then
write(6,'(a,/,4(12x,12(e12.5,1x),/))') '<< CONST >> dislocation multiplication', &
rhoDotMultiplication(:,1:4) * timestep
write(6,'(a,/,8(12x,12(e12.5,1x),/))') '<< CONST >> dislocation flux', &
rhoDotFlux(:,1:8) * timestep
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> dipole formation by glide', &
rhoDotSingle2DipoleGlide * timestep
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> athermal dipole annihilation', &
rhoDotAthermalAnnihilation * timestep
write(6,'(a,/,2(12x,12(e12.5,1x),/))') '<< CONST >> thermally activated dipole annihilation', &
rhoDotThermalAnnihilation(:,9:10) * timestep
write(6,'(a,/,10(12x,12(e12.5,1x),/))') '<< CONST >> total density change', &
rhoDot * timestep
write(6,'(a,/,10(12x,12(f12.5,1x),/))') '<< CONST >> relative density change', &
rhoDot(:,1:8) * timestep / (abs(stt%rho(:,sgl))+1.0e-10), &
rhoDot(:,9:10) * timestep / (stt%rho(:,dip)+1.0e-10)
write(6,*)
endif
#endif
if ( any(rho(:,mob) + rhoDot(:,1:4) * timestep < -prm%atol_rho) &
.or. any(rho(:,dip) + rhoDot(:,9:10) * timestep < -prm%atol_rho)) then
#ifdef DEBUG
if (iand(debug_level(debug_constitutive),debug_levelExtensive) /= 0) then
write(6,'(a,i5,a,i2)') '<< CONST >> evolution rate leads to negative density at el ',el,' ip ',ip
write(6,'(a)') '<< CONST >> enforcing cutback !!!'
endif
#endif
plasticState(ph)%dotState = IEEE_value(1.0_pReal,IEEE_quiet_NaN)
else
dot%rho(:,of) = pack(rhoDot,.true.)
dot%gamma(:,of) = sum(gdot,2)
endif
end associate end associate
end subroutine plastic_nonlocal_dotState end function rhoDotFlux
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------

View File

@ -213,7 +213,7 @@ module subroutine plastic_phenopowerlaw_init
+ size(['xi_tw ','gamma_tw']) * prm%sum_N_tw + size(['xi_tw ','gamma_tw']) * prm%sum_N_tw
sizeState = sizeDotState sizeState = sizeDotState
call material_allocatePlasticState(p,NipcMyPhase,sizeState,sizeDotState,0) call material_allocateState(plasticState(p),NipcMyPhase,sizeState,sizeDotState,0)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! state aliases and initialization ! state aliases and initialization

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,6 @@ module material
use results use results
use IO use IO
use debug use debug
use numerics
use rotations use rotations
use discretization use discretization
@ -174,8 +173,7 @@ module material
public :: & public :: &
material_init, & material_init, &
material_allocatePlasticState, & material_allocateState, &
material_allocateSourceState, &
ELASTICITY_HOOKE_ID ,& ELASTICITY_HOOKE_ID ,&
PLASTICITY_NONE_ID, & PLASTICITY_NONE_ID, &
PLASTICITY_ISOTROPIC_ID, & PLASTICITY_ISOTROPIC_ID, &
@ -700,63 +698,35 @@ end subroutine material_parseTexture
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief allocates the plastic state of a phase !> @brief Allocate the components of the state structure for a given phase
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine material_allocatePlasticState(phase,NipcMyPhase,& subroutine material_allocateState(state, &
sizeState,sizeDotState,sizeDeltaState) NipcMyPhase,sizeState,sizeDotState,sizeDeltaState)
class(tState), intent(out) :: &
state
integer, intent(in) :: & integer, intent(in) :: &
phase, &
NipcMyPhase, & NipcMyPhase, &
sizeState, & sizeState, &
sizeDotState, & sizeDotState, &
sizeDeltaState sizeDeltaState
plasticState(phase)%sizeState = sizeState state%sizeState = sizeState
plasticState(phase)%sizeDotState = sizeDotState state%sizeDotState = sizeDotState
plasticState(phase)%sizeDeltaState = sizeDeltaState state%sizeDeltaState = sizeDeltaState
plasticState(phase)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition state%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
allocate(plasticState(phase)%atol (sizeState), source=0.0_pReal) allocate(state%atol (sizeState), source=0.0_pReal)
allocate(plasticState(phase)%state0 (sizeState,NipcMyPhase), source=0.0_pReal) allocate(state%state0 (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(plasticState(phase)%partionedState0 (sizeState,NipcMyPhase), source=0.0_pReal) allocate(state%partionedState0(sizeState,NipcMyPhase), source=0.0_pReal)
allocate(plasticState(phase)%subState0 (sizeState,NipcMyPhase), source=0.0_pReal) allocate(state%subState0 (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(plasticState(phase)%state (sizeState,NipcMyPhase), source=0.0_pReal) allocate(state%state (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(plasticState(phase)%dotState (sizeDotState,NipcMyPhase),source=0.0_pReal) allocate(state%dotState (sizeDotState,NipcMyPhase), source=0.0_pReal)
allocate(plasticState(phase)%deltaState (sizeDeltaState,NipcMyPhase),source=0.0_pReal) allocate(state%deltaState(sizeDeltaState,NipcMyPhase), source=0.0_pReal)
end subroutine material_allocatePlasticState end subroutine material_allocateState
!--------------------------------------------------------------------------------------------------
!> @brief allocates the source state of a phase
!--------------------------------------------------------------------------------------------------
subroutine material_allocateSourceState(phase,of,NipcMyPhase,&
sizeState,sizeDotState,sizeDeltaState)
integer, intent(in) :: &
phase, &
of, &
NipcMyPhase, &
sizeState, sizeDotState,sizeDeltaState
sourceState(phase)%p(of)%sizeState = sizeState
sourceState(phase)%p(of)%sizeDotState = sizeDotState
sourceState(phase)%p(of)%sizeDeltaState = sizeDeltaState
sourceState(phase)%p(of)%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
allocate(sourceState(phase)%p(of)%atol (sizeState), source=0.0_pReal)
allocate(sourceState(phase)%p(of)%state0 (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(sourceState(phase)%p(of)%partionedState0 (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(sourceState(phase)%p(of)%subState0 (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(sourceState(phase)%p(of)%state (sizeState,NipcMyPhase), source=0.0_pReal)
allocate(sourceState(phase)%p(of)%dotState (sizeDotState,NipcMyPhase),source=0.0_pReal)
allocate(sourceState(phase)%p(of)%deltaState (sizeDeltaState,NipcMyPhase),source=0.0_pReal)
end subroutine material_allocateSourceState
end module material end module material

View File

@ -20,8 +20,7 @@ module numerics
iJacoStiffness = 1, & !< frequency of stiffness update iJacoStiffness = 1, & !< frequency of stiffness update
randomSeed = 0, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed randomSeed = 0, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
worldsize = 1, & !< MPI worldsize (/=1 for MPI simulations only) worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
numerics_integrator = 1 !< method used for state integration Default 1: fix-point iteration
integer(4), protected, public :: & integer(4), protected, public :: &
DAMASK_NumThreadsInt = 0 !< value stored in environment variable DAMASK_NUM_THREADS, set to zero if no OpenMP directive DAMASK_NumThreadsInt = 0 !< value stored in environment variable DAMASK_NUM_THREADS, set to zero if no OpenMP directive
real(pReal), protected, public :: & real(pReal), protected, public :: &
@ -134,8 +133,6 @@ subroutine numerics_init
defgradTolerance = IO_floatValue(line,chunkPos,2) defgradTolerance = IO_floatValue(line,chunkPos,2)
case ('ijacostiffness') case ('ijacostiffness')
iJacoStiffness = IO_intValue(line,chunkPos,2) iJacoStiffness = IO_intValue(line,chunkPos,2)
case ('integrator')
numerics_integrator = IO_intValue(line,chunkPos,2)
case ('usepingpong') case ('usepingpong')
usepingpong = IO_intValue(line,chunkPos,2) > 0 usepingpong = IO_intValue(line,chunkPos,2) > 0
case ('unitlength') case ('unitlength')
@ -176,6 +173,11 @@ subroutine numerics_init
case ('maxstaggerediter') case ('maxstaggerediter')
stagItMax = IO_intValue(line,chunkPos,2) stagItMax = IO_intValue(line,chunkPos,2)
#ifdef PETSC
case ('petsc_options')
petsc_options = trim(line(chunkPos(4):))
#endif
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! spectral parameters ! spectral parameters
#ifdef Grid #ifdef Grid
@ -187,8 +189,6 @@ subroutine numerics_init
err_stress_tolrel = IO_floatValue(line,chunkPos,2) err_stress_tolrel = IO_floatValue(line,chunkPos,2)
case ('err_stress_tolabs') case ('err_stress_tolabs')
err_stress_tolabs = IO_floatValue(line,chunkPos,2) err_stress_tolabs = IO_floatValue(line,chunkPos,2)
case ('petsc_options')
petsc_options = trim(line(chunkPos(4):))
case ('err_curl_tolabs') case ('err_curl_tolabs')
err_curl_tolAbs = IO_floatValue(line,chunkPos,2) err_curl_tolAbs = IO_floatValue(line,chunkPos,2)
case ('err_curl_tolrel') case ('err_curl_tolrel')
@ -206,8 +206,6 @@ subroutine numerics_init
integrationorder = IO_intValue(line,chunkPos,2) integrationorder = IO_intValue(line,chunkPos,2)
case ('structorder') case ('structorder')
structorder = IO_intValue(line,chunkPos,2) structorder = IO_intValue(line,chunkPos,2)
case ('petsc_options')
petsc_options = trim(line(chunkPos(4):))
case ('bbarstabilisation') case ('bbarstabilisation')
BBarStabilisation = IO_intValue(line,chunkPos,2) > 0 BBarStabilisation = IO_intValue(line,chunkPos,2) > 0
#endif #endif
@ -223,7 +221,6 @@ subroutine numerics_init
! writing parameters to output ! writing parameters to output
write(6,'(a24,1x,es8.1)') ' defgradTolerance: ',defgradTolerance write(6,'(a24,1x,es8.1)') ' defgradTolerance: ',defgradTolerance
write(6,'(a24,1x,i8)') ' iJacoStiffness: ',iJacoStiffness write(6,'(a24,1x,i8)') ' iJacoStiffness: ',iJacoStiffness
write(6,'(a24,1x,i8)') ' integrator: ',numerics_integrator
write(6,'(a24,1x,L8)') ' use ping pong scheme: ',usepingpong write(6,'(a24,1x,L8)') ' use ping pong scheme: ',usepingpong
write(6,'(a24,1x,es8.1,/)')' unitlength: ',numerics_unitlength write(6,'(a24,1x,es8.1,/)')' unitlength: ',numerics_unitlength
@ -266,7 +263,6 @@ subroutine numerics_init
write(6,'(a24,1x,es8.1)') ' err_curl_tolRel: ',err_curl_tolRel write(6,'(a24,1x,es8.1)') ' err_curl_tolRel: ',err_curl_tolRel
write(6,'(a24,1x,es8.1)') ' polarAlpha: ',polarAlpha write(6,'(a24,1x,es8.1)') ' polarAlpha: ',polarAlpha
write(6,'(a24,1x,es8.1)') ' polarBeta: ',polarBeta write(6,'(a24,1x,es8.1)') ' polarBeta: ',polarBeta
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
#endif #endif
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -274,16 +270,17 @@ subroutine numerics_init
#ifdef FEM #ifdef FEM
write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder
write(6,'(a24,1x,i8)') ' structOrder: ',structOrder write(6,'(a24,1x,i8)') ' structOrder: ',structOrder
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation
#endif #endif
#ifdef PETSC
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_options)
#endif
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! sanity checks ! sanity checks
if (defgradTolerance <= 0.0_pReal) call IO_error(301,ext_msg='defgradTolerance') if (defgradTolerance <= 0.0_pReal) call IO_error(301,ext_msg='defgradTolerance')
if (iJacoStiffness < 1) call IO_error(301,ext_msg='iJacoStiffness') if (iJacoStiffness < 1) call IO_error(301,ext_msg='iJacoStiffness')
if (numerics_integrator <= 0 .or. numerics_integrator >= 6) &
call IO_error(301,ext_msg='integrator')
if (numerics_unitlength <= 0.0_pReal) call IO_error(301,ext_msg='unitlength') if (numerics_unitlength <= 0.0_pReal) call IO_error(301,ext_msg='unitlength')
if (residualStiffness < 0.0_pReal) call IO_error(301,ext_msg='residualStiffness') if (residualStiffness < 0.0_pReal) call IO_error(301,ext_msg='residualStiffness')
if (itmax <= 1) call IO_error(301,ext_msg='itmax') if (itmax <= 1) call IO_error(301,ext_msg='itmax')

View File

@ -53,8 +53,7 @@ module prec
logical :: & logical :: &
nonlocal = .false. nonlocal = .false.
real(pReal), pointer, dimension(:,:) :: & real(pReal), pointer, dimension(:,:) :: &
slipRate, & !< slip rate slipRate !< slip rate
accumulatedSlip !< accumulated plastic slip
end type end type
type :: tSourceState type :: tSourceState

View File

@ -107,7 +107,7 @@ subroutine source_damage_anisoBrittle_init
if (any(prm%critDisp < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_critDisp' if (any(prm%critDisp < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_critDisp'
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisobrittle_atol',defaultVal=1.0e-3_pReal) sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisobrittle_atol',defaultVal=1.0e-3_pReal)
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_atol' if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisobrittle_atol'

View File

@ -89,7 +89,7 @@ subroutine source_damage_anisoDuctile_init
if (any(prm%critPlasticStrain < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_criticalplasticstrain' if (any(prm%critPlasticStrain < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_criticalplasticstrain'
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisoductile_atol',defaultVal=1.0e-3_pReal) sourceState(p)%p(sourceOffset)%atol = config%getFloat('anisoductile_atol',defaultVal=1.0e-3_pReal)
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_atol' if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' anisoductile_atol'

View File

@ -83,7 +83,7 @@ subroutine source_damage_isoBrittle_init
if (prm%critStrainEnergy <= 0.0_pReal) extmsg = trim(extmsg)//' isobrittle_criticalstrainenergy' if (prm%critStrainEnergy <= 0.0_pReal) extmsg = trim(extmsg)//' isobrittle_criticalstrainenergy'
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,1) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,1)
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isobrittle_atol',defaultVal=1.0e-3_pReal) sourceState(p)%p(sourceOffset)%atol = config%getFloat('isobrittle_atol',defaultVal=1.0e-3_pReal)
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isobrittle_atol' if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isobrittle_atol'

View File

@ -82,7 +82,7 @@ subroutine source_damage_isoDuctile_init
if (prm%critPlasticStrain <= 0.0_pReal) extmsg = trim(extmsg)//' isoductile_criticalplasticstrain' if (prm%critPlasticStrain <= 0.0_pReal) extmsg = trim(extmsg)//' isoductile_criticalplasticstrain'
NipcMyPhase=count(material_phaseAt==p) * discretization_nIP NipcMyPhase=count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
sourceState(p)%p(sourceOffset)%atol = config%getFloat('isoductile_atol',defaultVal=1.0e-3_pReal) sourceState(p)%p(sourceOffset)%atol = config%getFloat('isoductile_atol',defaultVal=1.0e-3_pReal)
if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isoductile_atol' if(any(sourceState(p)%p(sourceOffset)%atol < 0.0_pReal)) extmsg = trim(extmsg)//' isoductile_atol'

View File

@ -67,7 +67,7 @@ subroutine source_thermal_dissipation_init
prm%kappa = config%getFloat('dissipation_coldworkcoeff') prm%kappa = config%getFloat('dissipation_coldworkcoeff')
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,0,0,0) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,0,0,0)
end associate end associate
enddo enddo

View File

@ -74,7 +74,7 @@ subroutine source_thermal_externalheat_init
prm%heat_rate = config%getFloats('externalheat_rate',requiredSize = size(prm%time)) prm%heat_rate = config%getFloats('externalheat_rate',requiredSize = size(prm%time))
NipcMyPhase = count(material_phaseAt==p) * discretization_nIP NipcMyPhase = count(material_phaseAt==p) * discretization_nIP
call material_allocateSourceState(p,sourceOffset,NipcMyPhase,1,1,0) call material_allocateState(sourceState(p)%p(sourceOffset),NipcMyPhase,1,1,0)
end associate end associate
enddo enddo