Merge branch 'development' into unified-RK4-RKCK45

This commit is contained in:
Martin Diehl 2020-04-24 06:29:51 +02:00
commit 49963b9f04
21 changed files with 288 additions and 224 deletions

View File

@ -203,7 +203,6 @@ Post_OrientationConversion:
stage: postprocessing
script:
- OrientationConversion/test.py
- OrientationConversion/test2.py
except:
- master
- release

View File

@ -1 +1 @@
v2.0.3-2303-g2a6132b7
v2.0.3-2364-g62f7363a

View File

@ -33,7 +33,7 @@ for filename in options.filenames:
results = damask.Result(filename)
if not results.structured: continue
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin)
coords = damask.grid_filters.cell_coord0(results.grid,results.size,results.origin).reshape(-1,3,order='F')
N_digits = int(np.floor(np.log10(int(results.increments[-1][3:]))))+1
N_digits = 5 # hack to keep test intact

View File

@ -16,8 +16,8 @@ scriptID = ' '.join([scriptName,damask.version])
def volTetrahedron(coords):
"""
Return the volume of the tetrahedron with given vertices or sides.
Ifvertices are given they must be in a NumPy array with shape (4,3): the
If vertices are given they must be in a NumPy array with shape (4,3): the
position vectors of the 4 vertices in 3 dimensions; if the six sides are
given, they must be an array of length 6. If both are given, the sides
will be used in the calculation.
@ -62,19 +62,18 @@ def volTetrahedron(coords):
def volumeMismatch(size,F,nodes):
"""
Calculates the volume mismatch.
volume mismatch is defined as the difference between volume of reconstructed
volume mismatch is defined as the difference between volume of reconstructed
(compatible) cube and determinant of deformation gradient at Fourier point.
"""
coords = np.empty([8,3])
vMismatch = np.empty(grid[::-1])
volInitial = size.prod()/grid.prod()
vMismatch = np.empty(F.shape[:3])
#--------------------------------------------------------------------------------------------------
# calculate actual volume and volume resulting from deformation gradient
for k in range(grid[2]):
for k in range(grid[0]):
for j in range(grid[1]):
for i in range(grid[0]):
for i in range(grid[2]):
coords[0,0:3] = nodes[k, j, i ,0:3]
coords[1,0:3] = nodes[k ,j, i+1,0:3]
coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
@ -91,47 +90,45 @@ def volumeMismatch(size,F,nodes):
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[5,0:3]])) \
+ abs(volTetrahedron([coords[6,0:3],coords[4,0:3],coords[1,0:3],coords[0,0:3]]))) \
/np.linalg.det(F[k,j,i,0:3,0:3])
return vMismatch/volInitial
return vMismatch/(size.prod()/grid.prod())
def shapeMismatch(size,F,nodes,centres):
"""
Routine to calculate the shape mismatch.
shape mismatch is defined as difference between the vectors from the central point to
the corners of reconstructed (combatible) volume element and the vectors calculated by deforming
the initial volume element with the current deformation gradient.
"""
coordsInitial = np.empty([8,3])
sMismatch = np.empty(grid[::-1])
sMismatch = np.empty(F.shape[:3])
#--------------------------------------------------------------------------------------------------
# initial positions
coordsInitial[0,0:3] = [-size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]]
coordsInitial[1,0:3] = [+size[0]/grid[0],-size[1]/grid[1],-size[2]/grid[2]]
coordsInitial[2,0:3] = [+size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]]
coordsInitial[3,0:3] = [-size[0]/grid[0],+size[1]/grid[1],-size[2]/grid[2]]
coordsInitial[4,0:3] = [-size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]]
coordsInitial[5,0:3] = [+size[0]/grid[0],-size[1]/grid[1],+size[2]/grid[2]]
coordsInitial[6,0:3] = [+size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]]
coordsInitial[7,0:3] = [-size[0]/grid[0],+size[1]/grid[1],+size[2]/grid[2]]
coordsInitial = coordsInitial/2.0
delta = size/grid*.5
coordsInitial = np.vstack((delta * np.array((-1,-1,-1)),
delta * np.array((+1,-1,-1)),
delta * np.array((+1,+1,-1)),
delta * np.array((-1,+1,-1)),
delta * np.array((-1,-1,+1)),
delta * np.array((+1,-1,+1)),
delta * np.array((+1,+1,+1)),
delta * np.array((-1,+1,+1))))
#--------------------------------------------------------------------------------------------------
# compare deformed original and deformed positions to actual positions
for k in range(grid[2]):
for k in range(grid[0]):
for j in range(grid[1]):
for i in range(grid[0]):
for i in range(grid[2]):
sMismatch[k,j,i] = \
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
+ np.linalg.norm(nodes[k, j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[2,0:3]))\
+ np.linalg.norm(nodes[k, j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[3,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[4,0:3]))\
+ np.linalg.norm(nodes[k+1,j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[5,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[6,0:3]))\
+ np.linalg.norm(nodes[k+1,j+1,i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
+ np.linalg.norm(nodes[k ,j+1,i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[7,0:3]))
return sMismatch
@ -174,24 +171,24 @@ if filenames == []: filenames = [None]
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
F = table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3)
F = table.get(options.defgrad).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
nodes = damask.grid_filters.node_coord(size,F)
if options.shape:
centers = damask.grid_filters.cell_coord(size,F)
shapeMismatch = shapeMismatch( size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes,centers)
shapeMismatch = shapeMismatch(size,F,nodes,centers)
table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape(-1,1),
shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
if options.volume:
volumeMismatch = volumeMismatch(size,table.get(options.defgrad).reshape(grid[2],grid[1],grid[0],3,3),nodes)
volumeMismatch = volumeMismatch(size,F,nodes)
table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1),
volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels:
field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape))
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
curl = damask.grid_filters.curl(size,field)
table.add('curlFFT({})'.format(label),
damask.grid_filters.curl(size[::-1],field).reshape(-1,np.prod(shape)),
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -5,8 +5,6 @@ import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
@ -51,23 +49,23 @@ for name in filenames:
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
grid,size,origin = damask.grid_filters.cell_coord0_gridSizeOrigin(table.get(options.pos))
F = table.get(options.f).reshape(np.append(grid[::-1],(3,3)))
F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
if options.nodal:
table = damask.Table(damask.grid_filters.node_coord0(grid[::-1],size[::-1]).reshape(-1,3),
table = damask.Table(damask.grid_filters.node_coord0(grid,size).reshape(-1,3,order='F'),
{'pos':(3,)})
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size[::-1],F).reshape(-1,3),
damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size[::-1],F).reshape(-1,3),
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
else:
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size[::-1],F).reshape(-1,3),
damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size[::-1],F).reshape(-1,3),
damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels:
field = table.get(label)
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(np.append(grid[::-1],shape))
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
div = damask.grid_filters.divergence(size,field)
table.add('divFFT({})'.format(label),
damask.grid_filters.divergence(size[::-1],field).reshape(-1,np.prod(shape)//3),
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -49,9 +49,10 @@ for name in filenames:
for label in options.labels:
field = table.get(label)
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
field = field.reshape(np.append(grid[::-1],shape))
field = field.reshape(tuple(grid)+(-1,),order='F')
grad = damask.grid_filters.gradient(size,field)
table.add('gradFFT({})'.format(label),
damask.grid_filters.gradient(size[::-1],field).reshape(-1,np.prod(shape)*3),
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_ASCII(sys.stdout if name is None else name)

View File

@ -61,7 +61,7 @@ if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift)
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
if (options.grid is None or options.size is None):
@ -87,11 +87,11 @@ for name in filenames:
origin = list(-(packing//2)) + [0])\
[::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),-1),order = 'F')
table = damask.Table(averagedDown,table.shapes,table.comments)
coords = damask.grid_filters.cell_coord0(packedGrid,size,shift/packedGrid*size+origin)
table.set(options.pos, coords.reshape(-1,3))
table.set(options.pos, coords.reshape(-1,3,order='F'))
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))

View File

@ -59,13 +59,13 @@ for name in filenames:
packing = np.array(options.packing,'i')
outSize = grid*packing
data = table.data.values.reshape(tuple(grid)+(-1,))
blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1)
data = table.data.values.reshape(tuple(grid)+(-1,),order='F')
blownUp = ndimage.interpolation.zoom(data,tuple(packing)+(1,),order=0,mode='nearest').reshape(outSize.prod(),-1,order='F')
table = damask.Table(blownUp,table.shapes,table.comments)
coords = damask.grid_filters.cell_coord0(outSize,size,origin)
table.set(options.pos,coords.reshape(-1,3))
table.set(options.pos,coords.reshape(-1,3,order='F'))
table.set('elem',np.arange(1,outSize.prod()+1))
outname = os.path.join(os.path.dirname(name),prefix+os.path.basename(name))

View File

@ -60,7 +60,7 @@ for name in filenames:
table.head_read()
# ------------------------------------------ process labels ---------------------------------------
# ------------------------------------------ process labels ---------------------------------------
errors = []
remarks = []
@ -80,7 +80,7 @@ for name in filenames:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
randomSeed = int(os.urandom(4).hex(), 16) if options.randomSeed is None else options.randomSeed # random seed per file
@ -97,17 +97,17 @@ for name in filenames:
for col,dim in zip(columns,dims):
if options.unique:
s = set(map(tuple,table.data[:,col:col+dim])) # generate set of (unique) values
uniques = np.array(map(np.array,s)) # translate set to np.array
uniques = np.array(list(map(np.array,s))) # translate set to np.array
shuffler = dict(zip(s,np.random.permutation(len(s)))) # random permutation
table.data[:,col:col+dim] = uniques[np.array(map(lambda x: shuffler[tuple(x)],
table.data[:,col:col+dim]))] # fill table with mapped uniques
table.data[:,col:col+dim] = uniques[np.array(list(map(lambda x: shuffler[tuple(x)],
table.data[:,col:col+dim])))] # fill table with mapped uniques
else:
np.random.shuffle(table.data[:,col:col+dim]) # independently shuffle every row
# ------------------------------------------ output result -----------------------------------------
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray()
# ------------------------------------------ output finalization -----------------------------------
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -24,22 +24,22 @@ def findClosestSeed(seeds, weights, point):
def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2):
if periodic:
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
weights_p = np.tile(weights.squeeze(),27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3,order='F')
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3)
else:
weights_p = weights.flatten()
weights_p = weights.squeeze()
seeds_p = seeds
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F')
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
if cpus > 1:
pool = multiprocessing.Pool(processes = cpus)
result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords])
pool.close()
pool.join()
closest_seed = np.array(result.get())
closest_seed = np.array(result.get()).reshape(-1,3)
else:
closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords])
@ -52,7 +52,7 @@ def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), peri
def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True):
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3,order='F')
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
devNull,closest_seed = KDTree.query(coords)

View File

@ -45,7 +45,7 @@ options.blacklist = [int(i) for i in options.blacklist]
for name in filenames:
damask.util.report(scriptName,name)
geom = damask.Geom.from_file(StringIO(''.join(sys.stdin.read())) if name is None else name)
microstructure = geom.get_microstructure().reshape((-1,1),order='F')
@ -53,9 +53,9 @@ for name in filenames:
np.full(geom.grid.prod(),True,dtype=bool),
np.in1d(microstructure,options.blacklist,invert=True) if options.blacklist else \
np.full(geom.grid.prod(),True,dtype=bool))
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3)
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3,order='F')
comments = geom.comments \
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
'grid\ta {}\tb {}\tc {}'.format(*geom.grid),

View File

@ -128,7 +128,7 @@ for name in filenames:
if not options.selective:
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3)
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \
+ np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid
else:

View File

@ -322,11 +322,10 @@ class Geom:
if i != grid.prod():
raise TypeError('Invalid file: expected {} entries, found {}'.format(grid.prod(),i))
microstructure = microstructure.reshape(grid,order='F')
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
if not np.any(np.mod(microstructure,1) != 0.0): # no float present
microstructure = microstructure.astype('int')
return Geom(microstructure.reshape(grid),size,origin,homogenization,comments)
return Geom(microstructure.reshape(grid,order='F'),size,origin,homogenization,comments)
@staticmethod
@ -352,16 +351,15 @@ class Geom:
"""
if periodic:
weights_p = np.tile(weights,27).flatten(order='F') # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
weights_p = np.tile(weights,27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3,order='F')
coords = grid_filters.cell_coord0(grid*3,size*3,-size).reshape(-1,3)
else:
weights_p = weights.flatten()
weights_p = weights
seeds_p = seeds
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
pool = multiprocessing.Pool(processes = int(Environment().options['DAMASK_NUM_THREADS']))
result = pool.map_async(partial(Geom._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
@ -396,7 +394,7 @@ class Geom:
perform a periodic tessellation. Defaults to True.
"""
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
coords = grid_filters.cell_coord0(grid,size).reshape(-1,3)
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
devNull,microstructure = KDTree.query(coords)

View File

@ -68,12 +68,12 @@ class Result:
self.con_physics = []
for c in self.constituents:
self.con_physics += f['/'.join([self.increments[0],'constituent',c])].keys()
self.con_physics = list(set(self.con_physics)) # make unique
self.con_physics = list(set(self.con_physics)) # make unique
self.mat_physics = []
for m in self.materialpoints:
self.mat_physics += f['/'.join([self.increments[0],'materialpoint',m])].keys()
self.mat_physics = list(set(self.mat_physics)) # make unique
self.mat_physics = list(set(self.mat_physics)) # make unique
self.selection = {'increments': self.increments,
'constituents': self.constituents,'materialpoints': self.materialpoints,
@ -86,13 +86,19 @@ class Result:
def __repr__(self):
"""Show selected data."""
all_selected_increments = self.selection['increments']
self.pick('increments',all_selected_increments[0:1])
first = self.list_data()
self.pick('increments',all_selected_increments[-1:])
last = self.list_data()
last = '' if len(all_selected_increments) < 2 else self.list_data()
self.pick('increments',all_selected_increments)
in_between = ''.join(['\n{}\n ...\n'.format(inc) for inc in all_selected_increments[1:-2]])
return util.srepr(first+ in_between + last)
in_between = '' if len(all_selected_increments) < 3 else \
''.join(['\n{}\n ...\n'.format(inc) for inc in all_selected_increments[1:-2]])
return util.srepr(first + in_between + last)
def _manage_selection(self,action,what,datasets):
@ -105,7 +111,7 @@ class Result:
select from 'set', 'add', and 'del'
what : str
attribute to change (must be from self.selection)
datasets : list of str or Boolean
datasets : list of str or bool
name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to []
@ -197,7 +203,7 @@ class Result:
----------
what : str
attribute to change (must be from self.selection)
datasets : list of str or Boolean
datasets : list of str or bool
name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to []
@ -213,7 +219,7 @@ class Result:
----------
what : str
attribute to change (must be from self.selection)
datasets : list of str or Boolean
datasets : list of str or bool
name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to []
@ -229,7 +235,7 @@ class Result:
----------
what : str
attribute to change (must be from self.selection)
datasets : list of str or Boolean
datasets : list of str or bool
name of datasets as list, supports ? and * wildcards.
True is equivalent to [*], False is equivalent to []
@ -256,10 +262,10 @@ class Result:
datasets : iterable or str
component : int
homogenization component to consider for constituent data
tagged : Boolean
tagged : bool
tag Table.column name with '#component'
defaults to False
split : Boolean
split : bool
split Table by increment and return dictionary of Tables
defaults to True
@ -320,7 +326,7 @@ class Result:
Parameters
----------
datasets : iterable or str or Boolean
datasets : iterable or str or bool
Examples
--------
@ -454,7 +460,7 @@ class Result:
def cell_coordinates(self):
"""Return initial coordinates of the cell centers."""
if self.structured:
return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3)
return grid_filters.cell_coord0(self.grid,self.size,self.origin).reshape(-1,3,order='F')
else:
with h5py.File(self.fname,'r') as f:
return f['geometry/x_c'][()]
@ -1009,7 +1015,7 @@ class Result:
continue
lock.acquire()
with h5py.File(self.fname, 'a') as f:
try:
try: # ToDo: Replace if exists?
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
for l,v in result[1]['meta'].items():
dataset.attrs[l]=v.encode()

View File

@ -1,3 +1,17 @@
"""
Filters for operations on regular grids.
Notes
-----
The grids are defined as (x,y,z,...) where x is fastest and z is slowest.
This convention is consistent with the geom file format.
When converting to/from a plain list (e.g. storage in ASCII table),
the following operations are required for tensorial data:
D3 = D1.reshape(grid+(-1,),order='F').reshape(grid+(3,3))
D1 = D3.reshape(grid+(-1,)).reshape(-1,9,order='F')
"""
from scipy import spatial as _spatial
import numpy as _np
@ -7,8 +21,12 @@ def _ks(size,grid,first_order=False):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
grid : numpy.ndarray of shape (3)
number of grid points.
first_order : bool, optional
correction for first order derivatives, defaults to False.
"""
k_sk = _np.where(_np.arange(grid[0])>grid[0]//2,_np.arange(grid[0])-grid[0],_np.arange(grid[0]))/size[0]
@ -19,8 +37,7 @@ def _ks(size,grid,first_order=False):
k_si = _np.arange(grid[2]//2+1)/size[2]
kk, kj, ki = _np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
return _np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
return _np.stack(_np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij'), axis=-1)
def curl(size,field):
@ -29,8 +46,10 @@ def curl(size,field):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
periodic field of which the curl is calculated.
"""
n = _np.prod(field.shape[3:])
@ -41,8 +60,8 @@ def curl(size,field):
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
curl_ = (_np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 3
_np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3x3
curl_ = (_np.einsum('slm,ijkl,ijkm ->ijks', e,k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 3
_np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3x3
return _np.fft.irfftn(curl_,axes=(0,1,2),s=field.shape[:3])
@ -53,36 +72,40 @@ def divergence(size,field):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,3) or (:,:,:,3,3)
periodic field of which the divergence is calculated.
"""
n = _np.prod(field.shape[3:])
k_s = _ks(size,field.shape[:3],True)
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
div_ = (_np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 1
_np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3
div_ = (_np.einsum('ijkl,ijkl ->ijk', k_s,field_fourier)*2.0j*_np.pi if n == 3 else # vector, 3 -> 1
_np.einsum('ijkm,ijklm->ijkl',k_s,field_fourier)*2.0j*_np.pi) # tensor, 3x3 -> 3
return _np.fft.irfftn(div_,axes=(0,1,2),s=field.shape[:3])
def gradient(size,field):
"""
Calculate gradient of a vector or scalar field in Fourier space.
Calculate gradient of a scalar or vector field in Fourier space.
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
field : numpy.ndarray of shape (:,:,:,1) or (:,:,:,3)
periodic field of which the gradient is calculated.
"""
n = _np.prod(field.shape[3:])
k_s = _ks(size,field.shape[:3],True)
field_fourier = _np.fft.rfftn(field,axes=(0,1,2))
grad_ = (_np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*_np.pi if n == 1 else # scalar, 1 -> 3
_np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*_np.pi) # vector, 3 -> 3x3
grad_ = (_np.einsum('ijkl,ijkm->ijkm', field_fourier,k_s)*2.0j*_np.pi if n == 1 else # scalar, 1 -> 3
_np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*2.0j*_np.pi) # vector, 3 -> 3x3
return _np.fft.irfftn(grad_,axes=(0,1,2),s=field.shape[:3])
@ -93,9 +116,9 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
Parameters
----------
grid : numpy.ndarray
grid : numpy.ndarray of shape (3)
number of grid points.
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
origin : numpy.ndarray, optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
@ -103,7 +126,11 @@ def cell_coord0(grid,size,origin=_np.zeros(3)):
"""
start = origin + size/grid*.5
end = origin + size - size/grid*.5
return _np.mgrid[start[0]:end[0]:grid[0]*1j,start[1]:end[1]:grid[1]*1j,start[2]:end[2]:grid[2]*1j].T
return _np.stack(_np.meshgrid(_np.linspace(start[0],end[0],grid[0]),
_np.linspace(start[1],end[1],grid[1]),
_np.linspace(start[2],end[2],grid[2]),indexing = 'ij'),
axis = -1)
def cell_displacement_fluct(size,F):
@ -112,7 +139,7 @@ def cell_displacement_fluct(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
@ -139,14 +166,14 @@ def cell_displacement_avg(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
F_avg = _np.average(F,axis=(0,1,2))
return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),cell_coord0(F.shape[:3][::-1],size))
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),cell_coord0(F.shape[:3],size))
def cell_displacement(size,F):
@ -155,7 +182,7 @@ def cell_displacement(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
@ -170,25 +197,25 @@ def cell_coord(size,F,origin=_np.zeros(3)):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
origin : numpy.ndarray, optional
origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
"""
return cell_coord0(F.shape[:3][::-1],size,origin) + cell_displacement(size,F)
return cell_coord0(F.shape[:3],size,origin) + cell_displacement(size,F)
def cell_coord0_gridSizeOrigin(coord0,ordered=True):
"""
Return grid 'DNA', i.e. grid, size, and origin from array of cell positions.
Return grid 'DNA', i.e. grid, size, and origin from 1D array of cell positions.
Parameters
----------
coord0 : numpy.ndarray
array of undeformed cell coordinates.
coord0 : numpy.ndarray of shape (:,3)
undeformed cell coordinates.
ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow).
@ -211,13 +238,13 @@ def cell_coord0_gridSizeOrigin(coord0,ordered=True):
start = origin + delta*.5
end = origin - delta*.5 + size
if not _np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
_np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
_np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2])):
if not (_np.allclose(coords[0],_np.linspace(start[0],end[0],grid[0])) and \
_np.allclose(coords[1],_np.linspace(start[1],end[1],grid[1])) and \
_np.allclose(coords[2],_np.linspace(start[2],end[2],grid[2]))):
raise ValueError('Regular grid spacing violated.')
if ordered and not _np.allclose(coord0.reshape(tuple(grid[::-1])+(3,)),cell_coord0(grid,size,origin)):
raise ValueError('I_nput data is not a regular grid.')
if ordered and not _np.allclose(coord0.reshape(tuple(grid)+(3,),order='F'),cell_coord0(grid,size,origin)):
raise ValueError('Input data is not ordered (x fast, z slow).')
return (grid,size,origin)
@ -241,17 +268,18 @@ def node_coord0(grid,size,origin=_np.zeros(3)):
Parameters
----------
grid : numpy.ndarray
grid : numpy.ndarray of shape (3)
number of grid points.
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
origin : numpy.ndarray, optional
origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
"""
return _np.mgrid[origin[0]:size[0]+origin[0]:(grid[0]+1)*1j,
origin[1]:size[1]+origin[1]:(grid[1]+1)*1j,
origin[2]:size[2]+origin[2]:(grid[2]+1)*1j].T
return _np.stack(_np.meshgrid(_np.linspace(origin[0],size[0]+origin[0],grid[0]+1),
_np.linspace(origin[1],size[1]+origin[1],grid[1]+1),
_np.linspace(origin[2],size[2]+origin[2],grid[2]+1),indexing = 'ij'),
axis = -1)
def node_displacement_fluct(size,F):
@ -260,7 +288,7 @@ def node_displacement_fluct(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
@ -275,14 +303,14 @@ def node_displacement_avg(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
"""
F_avg = _np.average(F,axis=(0,1,2))
return _np.einsum('ml,ijkl->ijkm',F_avg-_np.eye(3),node_coord0(F.shape[:3][::-1],size))
return _np.einsum('ml,ijkl->ijkm',F_avg - _np.eye(3),node_coord0(F.shape[:3],size))
def node_displacement(size,F):
@ -291,7 +319,7 @@ def node_displacement(size,F):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
@ -306,15 +334,15 @@ def node_coord(size,F,origin=_np.zeros(3)):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size of the periodic field.
F : numpy.ndarray
deformation gradient field.
origin : numpy.ndarray, optional
origin : numpy.ndarray of shape (3), optional
physical origin of the periodic field. Defaults to [0.0,0.0,0.0].
"""
return node_coord0(F.shape[:3][::-1],size,origin) + node_displacement(size,F)
return node_coord0(F.shape[:3],size,origin) + node_displacement(size,F)
def cell_2_node(cell_data):
@ -335,14 +363,14 @@ def node_2_cell(node_data):
return c[:-1,:-1,:-1]
def node_coord0_gridSizeOrigin(coord0,ordered=False):
def node_coord0_gridSizeOrigin(coord0,ordered=True):
"""
Return grid 'DNA', i.e. grid, size, and origin from array of nodal positions.
Return grid 'DNA', i.e. grid, size, and origin from 1D array of nodal positions.
Parameters
----------
coord0 : numpy.ndarray
array of undeformed nodal coordinates.
coord0 : numpy.ndarray of shape (:,3)
undeformed nodal coordinates.
ordered : bool, optional
expect coord0 data to be ordered (x fast, z slow).
@ -357,13 +385,13 @@ def node_coord0_gridSizeOrigin(coord0,ordered=False):
if (grid+1).prod() != len(coord0):
raise ValueError('Data count {} does not match grid {}.'.format(len(coord0),grid))
if not _np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
_np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
_np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1)):
if not (_np.allclose(coords[0],_np.linspace(mincorner[0],maxcorner[0],grid[0]+1)) and \
_np.allclose(coords[1],_np.linspace(mincorner[1],maxcorner[1],grid[1]+1)) and \
_np.allclose(coords[2],_np.linspace(mincorner[2],maxcorner[2],grid[2]+1))):
raise ValueError('Regular grid spacing violated.')
if ordered and not _np.allclose(coord0.reshape(tuple((grid+1)[::-1])+(3,)),node_coord0(grid,size,origin)):
raise ValueError('I_nput data is not a regular grid.')
if ordered and not _np.allclose(coord0.reshape(tuple(grid+1)+(3,),order='F'),node_coord0(grid,size,origin)):
raise ValueError('Input data is not ordered (x fast, z slow).')
return (grid,size,origin)
@ -374,15 +402,15 @@ def regrid(size,F,new_grid):
Parameters
----------
size : numpy.ndarray
size : numpy.ndarray of shape (3)
physical size
F : numpy.ndarray
F : numpy.ndarray of shape (:,:,:,3,3)
deformation gradient field
new_grid : numpy.ndarray
new_grid : numpy.ndarray of shape (3)
new grid for undeformed coordinates
"""
c = cell_coord0(F.shape[:3][::-1],size) \
c = cell_coord0(F.shape[:3],size) \
+ cell_displacement_avg(size,F) \
+ cell_displacement_fluct(size,F)

View File

@ -24,6 +24,10 @@ def reference_dir(reference_dir_base):
class TestResult:
def test_self_report(self,default):
print(default)
def test_time_increments(self,default):
shape = default.read_dataset(default.get_dataset_location('F'),0).shape
default.set_by_time(0.0,20.0)

View File

@ -4,18 +4,18 @@ import numpy as np
from damask import grid_filters
class TestGridFilters:
def test_cell_coord0(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
coord = grid_filters.cell_coord0(grid,size)
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid[::-1]) + (3,)
assert np.allclose(coord[0,0,0],size/grid*.5) and coord.shape == tuple(grid) + (3,)
def test_node_coord0(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
coord = grid_filters.node_coord0(grid,size)
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid[::-1]+1) + (3,)
assert np.allclose(coord[-1,-1,-1],size) and coord.shape == tuple(grid+1) + (3,)
def test_coord0(self):
size = np.random.random(3)
@ -31,7 +31,7 @@ class TestGridFilters:
size = np.random.random(3)
origin = np.random.random(3)
coord0 = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode)) # noqa
_grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3))'.format(mode))
_grid,_size,_origin = eval('grid_filters.{}_coord0_gridSizeOrigin(coord0.reshape(-1,3,order="F"))'.format(mode))
assert np.allclose(grid,_grid) and np.allclose(size,_size) and np.allclose(origin,_origin)
def test_displacement_fluct_equivalence(self):
@ -57,9 +57,9 @@ class TestGridFilters:
shifted = eval('grid_filters.{}_coord0(grid,size,origin)'.format(mode))
unshifted = eval('grid_filters.{}_coord0(grid,size)'.format(mode))
if mode == 'cell':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]) +(3,)))
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid) +(3,)))
elif mode == 'node':
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid[::-1]+1)+(3,)))
assert np.allclose(shifted,unshifted+np.broadcast_to(origin,tuple(grid+1)+(3,)))
@pytest.mark.parametrize('function',[grid_filters.cell_displacement_avg,
grid_filters.node_displacement_avg])
@ -80,8 +80,43 @@ class TestGridFilters:
F = np.broadcast_to(np.random.random((3,3)), tuple(grid)+(3,3))
assert np.allclose(function(size,F),0.0)
@pytest.mark.parametrize('function',[grid_filters.coord0_check,
grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_invalid_coordinates(self,function):
invalid_coordinates = np.random.random((np.random.randint(12,52),3))
with pytest.raises(ValueError):
function(invalid_coordinates)
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_uneven_spaced_coordinates(self,function):
start = np.random.random(3)
end = np.random.random(3)*10. + start
grid = np.random.randint(8,32,(3))
uneven = np.stack(np.meshgrid(np.logspace(start[0],end[0],grid[0]),
np.logspace(start[1],end[1],grid[1]),
np.logspace(start[2],end[2],grid[2]),indexing = 'ij'),
axis = -1).reshape((grid.prod(),3),order='F')
with pytest.raises(ValueError):
function(uneven)
@pytest.mark.parametrize('mode',[True,False])
@pytest.mark.parametrize('function',[grid_filters.node_coord0_gridSizeOrigin,
grid_filters.cell_coord0_gridSizeOrigin])
def test_unordered_coordinates(self,function,mode):
origin = np.random.random(3)
size = np.random.random(3)*10.+origin
grid = np.random.randint(8,32,(3))
unordered = grid_filters.node_coord0(grid,size,origin).reshape(-1,3)
if mode:
with pytest.raises(ValueError):
function(unordered,mode)
else:
function(unordered,mode)
def test_regrid(self):
size = np.random.random(3)
grid = np.random.randint(8,32,(3))
F = np.broadcast_to(np.eye(3), tuple(grid[::-1])+(3,3))
F = np.broadcast_to(np.eye(3), tuple(grid)+(3,3))
assert all(grid_filters.regrid(size,F,grid) == np.arange(grid.prod()))

View File

@ -184,8 +184,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
e, & !< element number
mySource, &
myNgrains
real(pReal), dimension(3,3) :: &
subF
real(pReal), dimension(discretization_nIP,discretization_nElem) :: &
subFrac, &
subStep
@ -376,16 +374,15 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
!--------------------------------------------------------------------------------------------------
! deformation partitioning
! based on materialpoint_subF0,.._subF,crystallite_partionedF0, and homogenization_state,
! results in crystallite_partionedF
!$OMP PARALLEL DO PRIVATE(myNgrains,subF)
!$OMP PARALLEL DO PRIVATE(myNgrains)
elementLooping2: do e = FEsolving_execElem(1),FEsolving_execElem(2)
myNgrains = homogenization_Ngrains(material_homogenizationAt(e))
IpLooping2: do i = FEsolving_execIP(1),FEsolving_execIP(2)
if(requested(i,e) .and. .not. doneAndHappy(1,i,e)) then ! requested but not yet done
subF = materialpoint_F0(1:3,1:3,i,e) &
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e))*(subStep(i,e)+subFrac(i,e))
call partitionDeformation(subF,i,e) ! partition deformation onto constituents
call partitionDeformation(materialpoint_F0(1:3,1:3,i,e) &
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e))&
*(subStep(i,e)+subFrac(i,e)), &
i,e)
crystallite_dt(1:myNgrains,i,e) = dt*subStep(i,e) ! propagate materialpoint dt to grains
crystallite_requested(1:myNgrains,i,e) = .true. ! request calculation for constituents
else
@ -397,23 +394,22 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
!--------------------------------------------------------------------------------------------------
! crystallite integration
! based on crystallite_partionedF0,.._partionedF
! incrementing by crystallite_dt
converged = crystallite_stress() !ToDo: MD not sure if that is the best logic
!--------------------------------------------------------------------------------------------------
! state update
!$OMP PARALLEL DO PRIVATE(subF)
!$OMP PARALLEL DO
elementLooping3: do e = FEsolving_execElem(1),FEsolving_execElem(2)
IpLooping3: do i = FEsolving_execIP(1),FEsolving_execIP(2)
if (requested(i,e) .and. .not. doneAndHappy(1,i,e)) then
if (.not. converged(i,e)) then
doneAndHappy(1:2,i,e) = [.true.,.false.]
else
subF = materialpoint_F0(1:3,1:3,i,e) &
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e))*(subStep(i,e)+subFrac(i,e))
doneAndHappy(1:2,i,e) = updateState(dt*subStep(i,e),subF,i,e)
doneAndHappy(1:2,i,e) = updateState(dt*subStep(i,e), &
materialpoint_F0(1:3,1:3,i,e) &
+ (materialpoint_F(1:3,1:3,i,e)-materialpoint_F0(1:3,1:3,i,e)) &
*(subStep(i,e)+subFrac(i,e)), &
i,e)
converged(i,e) = all(doneAndHappy(1:2,i,e)) ! converged if done and happy
endif
endif

View File

@ -63,27 +63,27 @@ module quaternions
module procedure assign_quat__
module procedure assign_vec__
end interface assignment (=)
interface quaternion
module procedure init__
end interface quaternion
interface abs
procedure abs__
end interface abs
interface dot_product
procedure dot_product__
end interface dot_product
interface conjg
module procedure conjg__
end interface conjg
interface exp
module procedure exp__
end interface exp
interface log
module procedure log__
end interface log
@ -95,7 +95,7 @@ module quaternions
interface aimag
module procedure aimag__
end interface aimag
public :: &
quaternions_init, &
assignment(=), &
@ -118,7 +118,7 @@ end subroutine quaternions_init
!---------------------------------------------------------------------------------------------------
!> construct a quaternion from a 4-vector
!> @brief construct a quaternion from a 4-vector
!---------------------------------------------------------------------------------------------------
type(quaternion) pure function init__(array)
@ -133,7 +133,7 @@ end function init__
!---------------------------------------------------------------------------------------------------
!> assign a quaternion
!> @brief assign a quaternion
!---------------------------------------------------------------------------------------------------
elemental pure subroutine assign_quat__(self,other)
@ -141,12 +141,12 @@ elemental pure subroutine assign_quat__(self,other)
type(quaternion), intent(in) :: other
self = [other%w,other%x,other%y,other%z]
end subroutine assign_quat__
!---------------------------------------------------------------------------------------------------
!> assign a 4-vector
!> @brief assign a 4-vector
!---------------------------------------------------------------------------------------------------
pure subroutine assign_vec__(self,other)
@ -162,7 +162,7 @@ end subroutine assign_vec__
!---------------------------------------------------------------------------------------------------
!> add a quaternion
!> @brief add a quaternion
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function add__(self,other)
@ -170,24 +170,24 @@ type(quaternion) elemental pure function add__(self,other)
add__ = [ self%w, self%x, self%y ,self%z] &
+ [other%w, other%x, other%y,other%z]
end function add__
!---------------------------------------------------------------------------------------------------
!> return (unary positive operator)
!> @brief return (unary positive operator)
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function pos__(self)
class(quaternion), intent(in) :: self
pos__ = self * (+1.0_pReal)
end function pos__
!---------------------------------------------------------------------------------------------------
!> subtract a quaternion
!> @brief subtract a quaternion
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function sub__(self,other)
@ -195,24 +195,24 @@ type(quaternion) elemental pure function sub__(self,other)
sub__ = [ self%w, self%x, self%y ,self%z] &
- [other%w, other%x, other%y,other%z]
end function sub__
!---------------------------------------------------------------------------------------------------
!> negate (unary negative operator)
!> @brief negate (unary negative operator)
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function neg__(self)
class(quaternion), intent(in) :: self
neg__ = self * (-1.0_pReal)
end function neg__
!---------------------------------------------------------------------------------------------------
!> multiply with a quaternion
!> @brief multiply with a quaternion
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function mul_quat__(self,other)
@ -227,7 +227,7 @@ end function mul_quat__
!---------------------------------------------------------------------------------------------------
!> multiply with a scalar
!> @brief multiply with a scalar
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function mul_scal__(self,scal)
@ -235,12 +235,12 @@ type(quaternion) elemental pure function mul_scal__(self,scal)
real(pReal), intent(in) :: scal
mul_scal__ = [self%w,self%x,self%y,self%z]*scal
end function mul_scal__
!---------------------------------------------------------------------------------------------------
!> divide by a quaternion
!> @brief divide by a quaternion
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function div_quat__(self,other)
@ -252,7 +252,7 @@ end function div_quat__
!---------------------------------------------------------------------------------------------------
!> divide by a scalar
!> @brief divide by a scalar
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function div_scal__(self,scal)
@ -265,7 +265,7 @@ end function div_scal__
!---------------------------------------------------------------------------------------------------
!> test equality
!> @brief test equality
!---------------------------------------------------------------------------------------------------
logical elemental pure function eq__(self,other)
@ -278,7 +278,7 @@ end function eq__
!---------------------------------------------------------------------------------------------------
!> test inequality
!> @brief test inequality
!---------------------------------------------------------------------------------------------------
logical elemental pure function neq__(self,other)
@ -290,7 +290,7 @@ end function neq__
!---------------------------------------------------------------------------------------------------
!> raise to the power of a quaternion
!> @brief raise to the power of a quaternion
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function pow_quat__(self,expon)
@ -303,7 +303,7 @@ end function pow_quat__
!---------------------------------------------------------------------------------------------------
!> raise to the power of a scalar
!> @brief raise to the power of a scalar
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function pow_scal__(self,expon)
@ -316,7 +316,7 @@ end function pow_scal__
!---------------------------------------------------------------------------------------------------
!> take exponential
!> @brief take exponential
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function exp__(a)
@ -336,7 +336,7 @@ end function exp__
!---------------------------------------------------------------------------------------------------
!> take logarithm
!> @brief take logarithm
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function log__(a)
@ -356,7 +356,7 @@ end function log__
!---------------------------------------------------------------------------------------------------
!> return norm
!> @brief return norm
!---------------------------------------------------------------------------------------------------
real(pReal) elemental pure function abs__(self)
@ -368,7 +368,7 @@ end function abs__
!---------------------------------------------------------------------------------------------------
!> calculate dot product
!> @brief calculate dot product
!---------------------------------------------------------------------------------------------------
real(pReal) elemental pure function dot_product__(a,b)
@ -380,7 +380,7 @@ end function dot_product__
!---------------------------------------------------------------------------------------------------
!> take conjugate complex
!> @brief take conjugate complex
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function conjg__(self)
@ -392,7 +392,7 @@ end function conjg__
!---------------------------------------------------------------------------------------------------
!> homomorph
!> @brief homomorph
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function homomorphed(self)
@ -404,7 +404,7 @@ end function homomorphed
!---------------------------------------------------------------------------------------------------
!> return as plain array
!> @brief return as plain array
!---------------------------------------------------------------------------------------------------
pure function asArray(self)
@ -417,7 +417,7 @@ end function asArray
!---------------------------------------------------------------------------------------------------
!> real part (scalar)
!> @brief real part (scalar)
!---------------------------------------------------------------------------------------------------
pure function real__(self)
@ -430,7 +430,7 @@ end function real__
!---------------------------------------------------------------------------------------------------
!> imaginary part (3-vector)
!> @brief imaginary part (3-vector)
!---------------------------------------------------------------------------------------------------
pure function aimag__(self)
@ -443,7 +443,7 @@ end function aimag__
!---------------------------------------------------------------------------------------------------
!> inverse
!> @brief inverse
!---------------------------------------------------------------------------------------------------
type(quaternion) elemental pure function inverse(self)