not needed anymore
This commit is contained in:
parent
db8f6400f8
commit
7dc8391c03
|
@ -253,27 +253,6 @@ grid_parsingArguments:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
||||||
StateIntegration_compareVariants:
|
|
||||||
stage: grid
|
|
||||||
script: StateIntegration_compareVariants/test.py
|
|
||||||
except:
|
|
||||||
- master
|
|
||||||
- release
|
|
||||||
|
|
||||||
nonlocal_densityConservation:
|
|
||||||
stage: grid
|
|
||||||
script: nonlocal_densityConservation/test.py
|
|
||||||
except:
|
|
||||||
- master
|
|
||||||
- release
|
|
||||||
|
|
||||||
RGC_DetectChanges:
|
|
||||||
stage: grid
|
|
||||||
script: RGC_DetectChanges/test.py
|
|
||||||
except:
|
|
||||||
- master
|
|
||||||
- release
|
|
||||||
|
|
||||||
Nonlocal_Damage_DetectChanges:
|
Nonlocal_Damage_DetectChanges:
|
||||||
stage: grid
|
stage: grid
|
||||||
script: Nonlocal_Damage_DetectChanges/test.py
|
script: Nonlocal_Damage_DetectChanges/test.py
|
||||||
|
@ -302,15 +281,6 @@ grid_all_loadCaseRotation:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
||||||
grid_mech_MPI:
|
|
||||||
stage: grid
|
|
||||||
script:
|
|
||||||
- module load $IntelCompiler $MPICH_Intel $PETSc_MPICH_Intel
|
|
||||||
- grid_mech_MPI/test.py
|
|
||||||
except:
|
|
||||||
- master
|
|
||||||
- release
|
|
||||||
|
|
||||||
grid_all_restartMPI:
|
grid_all_restartMPI:
|
||||||
stage: grid
|
stage: grid
|
||||||
script:
|
script:
|
||||||
|
@ -327,13 +297,6 @@ Plasticity_DetectChanges:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
||||||
Homogenization:
|
|
||||||
stage: grid
|
|
||||||
script: Homogenization/test.py
|
|
||||||
except:
|
|
||||||
- master
|
|
||||||
- release
|
|
||||||
|
|
||||||
Phenopowerlaw_singleSlip:
|
Phenopowerlaw_singleSlip:
|
||||||
stage: grid
|
stage: grid
|
||||||
script: Phenopowerlaw_singleSlip/test.py
|
script: Phenopowerlaw_singleSlip/test.py
|
||||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit b73dcfe746eadce92ed0ab08a3bfbb19f5c8eb0a
|
Subproject commit b7c6e3a6742b6098530a6fa5121b23ec9487dd4f
|
|
@ -1,161 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from optparse import OptionParser
|
|
||||||
import re
|
|
||||||
import fnmatch
|
|
||||||
import math # noqa
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
def sortingList(labels,whitelistitems):
|
|
||||||
|
|
||||||
indices = []
|
|
||||||
names = []
|
|
||||||
|
|
||||||
for label in labels:
|
|
||||||
if re.match(r'^\d+_',label):
|
|
||||||
indices.append(int(label.split('_',1)[0]))
|
|
||||||
names.append(label.split('_',1)[1])
|
|
||||||
else:
|
|
||||||
indices.append(0)
|
|
||||||
names.append(label)
|
|
||||||
|
|
||||||
return [indices,names,whitelistitems]
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
|
|
||||||
Filter rows according to condition and columns by either white or black listing.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
Every odd row if x coordinate is positive -- " #ip.x# >= 0.0 and #_row_#%2 == 1 ).
|
|
||||||
All rows where label 'foo' equals 'bar' -- " #s#foo# == 'bar' "
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-w','--white',
|
|
||||||
dest = 'whitelist',
|
|
||||||
action = 'extend', metavar = '<string LIST>',
|
|
||||||
help = 'whitelist of column labels (a,b,c,...)')
|
|
||||||
parser.add_option('-b','--black',
|
|
||||||
dest = 'blacklist',
|
|
||||||
action = 'extend', metavar='<string LIST>',
|
|
||||||
help = 'blacklist of column labels (a,b,c,...)')
|
|
||||||
parser.add_option('-c','--condition',
|
|
||||||
dest = 'condition', metavar='string',
|
|
||||||
help = 'condition to filter rows')
|
|
||||||
|
|
||||||
parser.set_defaults(condition = None,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name)
|
|
||||||
except IOError:
|
|
||||||
continue
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble info ---------------------------------------
|
|
||||||
|
|
||||||
table.head_read()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ---------------------------------------
|
|
||||||
|
|
||||||
specials = { \
|
|
||||||
'_row_': 0,
|
|
||||||
}
|
|
||||||
labels = []
|
|
||||||
positions = []
|
|
||||||
|
|
||||||
for position,label in enumerate(table.labels(raw = True)):
|
|
||||||
if (options.whitelist is None or any([ position in table.label_indexrange(needle) \
|
|
||||||
or fnmatch.fnmatch(label,needle) for needle in options.whitelist])) \
|
|
||||||
and (options.blacklist is None or not any([ position in table.label_indexrange(needle) \
|
|
||||||
or fnmatch.fnmatch(label,needle) for needle in options.blacklist])): # a label to keep?
|
|
||||||
labels.append(label) # remember name...
|
|
||||||
positions.append(position) # ...and position
|
|
||||||
|
|
||||||
if len(labels) > 0 and options.whitelist is not None and options.blacklist is None: # check whether reordering is possible
|
|
||||||
whitelistitem = np.zeros(len(labels),dtype=int)
|
|
||||||
for i,label in enumerate(labels): # check each selected label
|
|
||||||
match = [ positions[i] in table.label_indexrange(needle) \
|
|
||||||
or fnmatch.fnmatch(label,needle) for needle in options.whitelist] # which whitelist items do match it
|
|
||||||
whitelistitem[i] = match.index(True) if np.sum(match) == 1 else -1 # unique match to a whitelist item --> store which
|
|
||||||
|
|
||||||
order = range(len(labels)) if np.any(whitelistitem < 0) \
|
|
||||||
else np.lexsort(sortingList(labels,whitelistitem)) # reorder if unique, i.e. no "-1" in whitelistitem
|
|
||||||
else:
|
|
||||||
order = range(len(labels)) # maintain original order of labels
|
|
||||||
|
|
||||||
# --------------------------------------- evaluate condition ---------------------------------------
|
|
||||||
if options.condition is not None:
|
|
||||||
condition = options.condition # copy per file, since might be altered inline
|
|
||||||
breaker = False
|
|
||||||
|
|
||||||
for position,(all,marker,column) in enumerate(set(re.findall(r'#(([s]#)?(.+?))#',condition))): # find three groups
|
|
||||||
idx = table.label_index(column)
|
|
||||||
dim = table.label_dimension(column)
|
|
||||||
if idx < 0 and column not in specials:
|
|
||||||
damask.util.croak('column "{}" not found.'.format(column))
|
|
||||||
breaker = True
|
|
||||||
else:
|
|
||||||
if column in specials:
|
|
||||||
replacement = 'specials["{}"]'.format(column)
|
|
||||||
elif dim == 1: # scalar input
|
|
||||||
replacement = '{}(table.data[{}])'.format({ '':'float',
|
|
||||||
's#':'str'}[marker],idx) # take float or string value of data column
|
|
||||||
elif dim > 1: # multidimensional input (vector, tensor, etc.)
|
|
||||||
replacement = 'np.array(table.data[{}:{}],dtype=float)'.format(idx,idx+dim) # use (flat) array representation
|
|
||||||
|
|
||||||
condition = condition.replace('#'+all+'#',replacement)
|
|
||||||
|
|
||||||
if breaker: continue # found mistake in condition evaluation --> next file
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.labels_clear()
|
|
||||||
table.labels_append(np.array(labels)[order]) # update with new label set
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process and output data ------------------------------------------
|
|
||||||
|
|
||||||
positions = np.array(positions)[order]
|
|
||||||
|
|
||||||
atOnce = options.condition is None
|
|
||||||
if atOnce: # read full array and filter columns
|
|
||||||
try:
|
|
||||||
table.data_readArray(positions+1) # read desired columns (indexed 1,...)
|
|
||||||
table.data_writeArray() # directly write out
|
|
||||||
except Exception:
|
|
||||||
table.data_rewind()
|
|
||||||
atOnce = False # data contains items that prevent array chunking
|
|
||||||
|
|
||||||
if not atOnce: # read data line by line
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
specials['_row_'] += 1 # count row
|
|
||||||
if options.condition is None or eval(condition): # valid row ?
|
|
||||||
table.data = [table.data[position] for position in positions] # retain filtered columns
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ finalize output -----------------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
|
@ -1,226 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import multiprocessing
|
|
||||||
from io import StringIO
|
|
||||||
from functools import partial
|
|
||||||
from optparse import OptionParser,OptionGroup
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from scipy import spatial
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
def findClosestSeed(seeds, weights, point):
|
|
||||||
return np.argmin(np.sum((np.broadcast_to(point,(len(seeds),3))-seeds)**2,axis=1) - weights)
|
|
||||||
|
|
||||||
|
|
||||||
def Laguerre_tessellation(grid, size, seeds, weights, origin = np.zeros(3), periodic = True, cpus = 2):
|
|
||||||
|
|
||||||
if periodic:
|
|
||||||
weights_p = np.tile(weights.squeeze(),27) # Laguerre weights (1,2,3,1,2,3,...,1,2,3)
|
|
||||||
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
|
||||||
coords = damask.grid_filters.cell_coord0(grid*3,size*3,-origin-size).reshape(-1,3)
|
|
||||||
else:
|
|
||||||
weights_p = weights.squeeze()
|
|
||||||
seeds_p = seeds
|
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
|
|
||||||
|
|
||||||
if cpus > 1:
|
|
||||||
pool = multiprocessing.Pool(processes = cpus)
|
|
||||||
result = pool.map_async(partial(findClosestSeed,seeds_p,weights_p), [coord for coord in coords])
|
|
||||||
pool.close()
|
|
||||||
pool.join()
|
|
||||||
closest_seed = np.array(result.get()).reshape(-1,3)
|
|
||||||
else:
|
|
||||||
closest_seed= np.array([findClosestSeed(seeds_p,weights_p,coord) for coord in coords])
|
|
||||||
|
|
||||||
if periodic:
|
|
||||||
closest_seed = closest_seed.reshape(grid*3)
|
|
||||||
return closest_seed[grid[0]:grid[0]*2,grid[1]:grid[1]*2,grid[2]:grid[2]*2]%seeds.shape[0]
|
|
||||||
else:
|
|
||||||
return closest_seed
|
|
||||||
|
|
||||||
|
|
||||||
def Voronoi_tessellation(grid, size, seeds, origin = np.zeros(3), periodic = True):
|
|
||||||
|
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size,-origin).reshape(-1,3)
|
|
||||||
KDTree = spatial.cKDTree(seeds,boxsize=size) if periodic else spatial.cKDTree(seeds)
|
|
||||||
devNull,closest_seed = KDTree.query(coords)
|
|
||||||
|
|
||||||
return closest_seed
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [seedfile(s)]', description = """
|
|
||||||
Generate geometry description and material configuration by tessellation of given seeds file.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Tessellation","")
|
|
||||||
|
|
||||||
group.add_option('-l',
|
|
||||||
'--laguerre',
|
|
||||||
dest = 'laguerre',
|
|
||||||
action = 'store_true',
|
|
||||||
help = 'use Laguerre (weighted Voronoi) tessellation')
|
|
||||||
group.add_option('--cpus',
|
|
||||||
dest = 'cpus',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'number of parallel processes to use for Laguerre tessellation [%default]')
|
|
||||||
group.add_option('--nonperiodic',
|
|
||||||
dest = 'periodic',
|
|
||||||
action = 'store_false',
|
|
||||||
help = 'nonperiodic tessellation')
|
|
||||||
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Geometry","")
|
|
||||||
|
|
||||||
group.add_option('-g',
|
|
||||||
'--grid',
|
|
||||||
dest = 'grid',
|
|
||||||
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
|
|
||||||
help = 'a,b,c grid of hexahedral box')
|
|
||||||
group.add_option('-s',
|
|
||||||
'--size',
|
|
||||||
dest = 'size',
|
|
||||||
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
|
|
||||||
help = 'x,y,z size of hexahedral box [1.0 1.0 1.0]')
|
|
||||||
group.add_option('-o',
|
|
||||||
'--origin',
|
|
||||||
dest = 'origin',
|
|
||||||
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
|
|
||||||
help = 'origin of grid [0.0 0.0 0.0]')
|
|
||||||
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Seeds","")
|
|
||||||
|
|
||||||
group.add_option('-p',
|
|
||||||
'--pos', '--seedposition',
|
|
||||||
dest = 'pos',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'label of coordinates [%default]')
|
|
||||||
group.add_option('-w',
|
|
||||||
'--weight',
|
|
||||||
dest = 'weight',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'label of weights [%default]')
|
|
||||||
group.add_option('-m',
|
|
||||||
'--microstructure',
|
|
||||||
dest = 'microstructure',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'label of microstructures [%default]')
|
|
||||||
group.add_option('-e',
|
|
||||||
'--eulers',
|
|
||||||
dest = 'eulers',
|
|
||||||
type = 'string', metavar = 'string',
|
|
||||||
help = 'label of Euler angles [%default]')
|
|
||||||
group.add_option('--axes',
|
|
||||||
dest = 'axes',
|
|
||||||
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
|
|
||||||
help = 'orientation coordinate frame in terms of position coordinate frame')
|
|
||||||
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Configuration","")
|
|
||||||
|
|
||||||
group.add_option('--without-config',
|
|
||||||
dest = 'config',
|
|
||||||
action = 'store_false',
|
|
||||||
help = 'omit material configuration header')
|
|
||||||
group.add_option('--phase',
|
|
||||||
dest = 'phase',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'phase index to be used [%default]')
|
|
||||||
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
parser.set_defaults(pos = 'pos',
|
|
||||||
weight = 'weight',
|
|
||||||
microstructure = 'microstructure',
|
|
||||||
eulers = 'euler',
|
|
||||||
phase = 1,
|
|
||||||
cpus = 2,
|
|
||||||
laguerre = False,
|
|
||||||
periodic = True,
|
|
||||||
config = True,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
size = np.ones(3)
|
|
||||||
origin = np.zeros(3)
|
|
||||||
for line in table.comments:
|
|
||||||
items = line.lower().strip().split()
|
|
||||||
key = items[0] if items else ''
|
|
||||||
if key == 'grid':
|
|
||||||
grid = np.array([ int(dict(zip(items[1::2],items[2::2]))[i]) for i in ['a','b','c']])
|
|
||||||
elif key == 'size':
|
|
||||||
size = np.array([float(dict(zip(items[1::2],items[2::2]))[i]) for i in ['x','y','z']])
|
|
||||||
elif key == 'origin':
|
|
||||||
origin = np.array([float(dict(zip(items[1::2],items[2::2]))[i]) for i in ['x','y','z']])
|
|
||||||
if options.grid: grid = np.array(options.grid)
|
|
||||||
if options.size: size = np.array(options.size)
|
|
||||||
if options.origin: origin = np.array(options.origin)
|
|
||||||
|
|
||||||
seeds = table.get(options.pos)
|
|
||||||
|
|
||||||
grains = table.get(options.microstructure) if options.microstructure in table.labels else np.arange(len(seeds))+1
|
|
||||||
grainIDs = np.unique(grains).astype('i')
|
|
||||||
|
|
||||||
if options.eulers in table.labels:
|
|
||||||
eulers = table.get(options.eulers)
|
|
||||||
|
|
||||||
if options.laguerre:
|
|
||||||
indices = grains[Laguerre_tessellation(grid,size,seeds,table.get(options.weight),origin,
|
|
||||||
options.periodic,options.cpus)]
|
|
||||||
else:
|
|
||||||
indices = grains[Voronoi_tessellation (grid,size,seeds,origin,options.periodic)]
|
|
||||||
|
|
||||||
config_header = []
|
|
||||||
if options.config:
|
|
||||||
|
|
||||||
if options.eulers in table.labels:
|
|
||||||
config_header += ['<texture>']
|
|
||||||
for ID in grainIDs:
|
|
||||||
eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id
|
|
||||||
config_header += ['[Grain{}]'.format(ID),
|
|
||||||
'(gauss)\tphi1 {:.2f}\tPhi {:.2f}\tphi2 {:.2f}'.format(*eulers[eulerID])
|
|
||||||
]
|
|
||||||
if options.axes: config_header += ['axes\t{} {} {}'.format(*options.axes)]
|
|
||||||
|
|
||||||
config_header += ['<microstructure>']
|
|
||||||
for ID in grainIDs:
|
|
||||||
config_header += ['[Grain{}]'.format(ID),
|
|
||||||
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(options.phase,ID)
|
|
||||||
]
|
|
||||||
|
|
||||||
config_header += ['<!skip>']
|
|
||||||
|
|
||||||
header = [scriptID + ' ' + ' '.join(sys.argv[1:])]\
|
|
||||||
+ config_header
|
|
||||||
geom = damask.Geom(indices.reshape(grid),size,origin,
|
|
||||||
comments=header)
|
|
||||||
damask.util.croak(geom)
|
|
||||||
|
|
||||||
geom.save_ASCII(sys.stdout if name is None else os.path.splitext(name)[0]+'.geom',compress=False)
|
|
|
@ -1,349 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import math
|
|
||||||
import random
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --- helper functions ---
|
|
||||||
def integerFactorization(i):
|
|
||||||
|
|
||||||
j = int(math.floor(math.sqrt(float(i))))
|
|
||||||
while j>1 and int(i)%j != 0:
|
|
||||||
j -= 1
|
|
||||||
return j
|
|
||||||
|
|
||||||
def binAsBins(bin,intervals):
|
|
||||||
"""Explode compound bin into 3D bins list."""
|
|
||||||
bins = [0]*3
|
|
||||||
bins[0] = (bin//(intervals[1] * intervals[2])) % intervals[0]
|
|
||||||
bins[1] = (bin//intervals[2]) % intervals[1]
|
|
||||||
bins[2] = bin % intervals[2]
|
|
||||||
return bins
|
|
||||||
|
|
||||||
def binsAsBin(bins,intervals):
|
|
||||||
"""Implode 3D bins into compound bin."""
|
|
||||||
return (bins[0]*intervals[1] + bins[1])*intervals[2] + bins[2]
|
|
||||||
|
|
||||||
def EulersAsBins(Eulers,intervals,deltas,center):
|
|
||||||
"""Return list of Eulers translated into 3D bins list."""
|
|
||||||
return [int((euler+(0.5-center)*delta)//delta)%interval \
|
|
||||||
for euler,delta,interval in zip(Eulers,deltas,intervals) \
|
|
||||||
]
|
|
||||||
|
|
||||||
def binAsEulers(bin,intervals,deltas,center):
|
|
||||||
"""Compound bin number translated into list of Eulers."""
|
|
||||||
Eulers = [0.0]*3
|
|
||||||
Eulers[2] = (bin%intervals[2] + center)*deltas[2]
|
|
||||||
Eulers[1] = (bin//intervals[2]%intervals[1] + center)*deltas[1]
|
|
||||||
Eulers[0] = (bin//(intervals[2]*intervals[1]) + center)*deltas[0]
|
|
||||||
return Eulers
|
|
||||||
|
|
||||||
def directInvRepetitions(probability,scale):
|
|
||||||
"""Calculate number of samples drawn by direct inversion."""
|
|
||||||
nDirectInv = 0
|
|
||||||
for bin in range(len(probability)): # loop over bins
|
|
||||||
nDirectInv += int(round(probability[bin]*scale)) # calc repetition
|
|
||||||
return nDirectInv
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------- sampling methods -----------------------------------------------------------------------
|
|
||||||
|
|
||||||
# ----- efficient algorithm ---------
|
|
||||||
def directInversion (ODF,nSamples):
|
|
||||||
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
|
|
||||||
nOptSamples = max(ODF['nNonZero'],nSamples) # random subsampling if too little samples requested
|
|
||||||
|
|
||||||
nInvSamples = 0
|
|
||||||
repetition = [None]*ODF['nBins']
|
|
||||||
|
|
||||||
scaleLower = 0.0
|
|
||||||
nInvSamplesLower = 0
|
|
||||||
scaleUpper = float(nOptSamples)
|
|
||||||
incFactor = 1.0
|
|
||||||
nIter = 0
|
|
||||||
nInvSamplesUpper = directInvRepetitions(ODF['dV_V'],scaleUpper)
|
|
||||||
while (\
|
|
||||||
(scaleUpper-scaleLower > scaleUpper*1e-15 or nInvSamplesUpper < nOptSamples) and \
|
|
||||||
nInvSamplesUpper != nOptSamples \
|
|
||||||
): # closer match required?
|
|
||||||
if nInvSamplesUpper < nOptSamples:
|
|
||||||
scaleLower,scaleUpper = scaleUpper,scaleUpper+incFactor*(scaleUpper-scaleLower)/2.0
|
|
||||||
incFactor *= 2.0
|
|
||||||
nInvSamplesLower,nInvSamplesUpper = nInvSamplesUpper,directInvRepetitions(ODF['dV_V'],scaleUpper)
|
|
||||||
else:
|
|
||||||
scaleUpper = (scaleLower+scaleUpper)/2.0
|
|
||||||
incFactor = 1.0
|
|
||||||
nInvSamplesUpper = directInvRepetitions(ODF['dV_V'],scaleUpper)
|
|
||||||
nIter += 1
|
|
||||||
damask.util.croak('%i:(%12.11f,%12.11f) %i <= %i <= %i'%(nIter,scaleLower,scaleUpper,
|
|
||||||
nInvSamplesLower,nOptSamples,nInvSamplesUpper))
|
|
||||||
nInvSamples = nInvSamplesUpper
|
|
||||||
scale = scaleUpper
|
|
||||||
damask.util.croak('created set of %i samples (%12.11f) with scaling %12.11f delivering %i'%(nInvSamples,
|
|
||||||
float(nInvSamples)/nOptSamples-1.0,
|
|
||||||
scale,nSamples))
|
|
||||||
repetition = [None]*ODF['nBins'] # preallocate and clear
|
|
||||||
|
|
||||||
for bin in range(ODF['nBins']): # loop over bins
|
|
||||||
repetition[bin] = int(round(ODF['dV_V'][bin]*scale)) # calc repetition
|
|
||||||
|
|
||||||
# build set
|
|
||||||
set = [None]*nInvSamples
|
|
||||||
i = 0
|
|
||||||
for bin in range(ODF['nBins']):
|
|
||||||
set[i:i+repetition[bin]] = [bin]*repetition[bin] # fill set with bin, i.e. orientation
|
|
||||||
i += repetition[bin] # advance set counter
|
|
||||||
|
|
||||||
orientations = np.zeros((nSamples,3),'f')
|
|
||||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
|
||||||
unitInc = 1.0/nSamples
|
|
||||||
for j in range(nSamples):
|
|
||||||
if (j == nInvSamples-1): ex = j
|
|
||||||
else: ex = int(round(random.uniform(j+0.5,nInvSamples-0.5)))
|
|
||||||
bin = set[ex]
|
|
||||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
|
||||||
orientations[j] = np.degrees(Eulers)
|
|
||||||
reconstructedODF[bin] += unitInc
|
|
||||||
set[ex] = set[j] # exchange orientations
|
|
||||||
|
|
||||||
return orientations, reconstructedODF
|
|
||||||
|
|
||||||
|
|
||||||
# ----- trial and error algorithms ---------
|
|
||||||
|
|
||||||
def MonteCarloEulers (ODF,nSamples):
|
|
||||||
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
|
|
||||||
countMC = 0
|
|
||||||
maxdV_V = max(ODF['dV_V'])
|
|
||||||
orientations = np.zeros((nSamples,3),'f')
|
|
||||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
|
||||||
unitInc = 1.0/nSamples
|
|
||||||
|
|
||||||
for j in range(nSamples):
|
|
||||||
MC = maxdV_V*2.0
|
|
||||||
bin = 0
|
|
||||||
while MC > ODF['dV_V'][bin]:
|
|
||||||
countMC += 1
|
|
||||||
MC = maxdV_V*random.random()
|
|
||||||
Eulers = [limit*random.random() for limit in ODF['limit']]
|
|
||||||
bins = EulersAsBins(Eulers,ODF['interval'],ODF['delta'],ODF['center'])
|
|
||||||
bin = binsAsBin(bins,ODF['interval'])
|
|
||||||
orientations[j] = np.degrees(Eulers)
|
|
||||||
reconstructedODF[bin] += unitInc
|
|
||||||
|
|
||||||
return orientations, reconstructedODF, countMC
|
|
||||||
|
|
||||||
|
|
||||||
def MonteCarloBins (ODF,nSamples):
|
|
||||||
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
|
|
||||||
countMC = 0
|
|
||||||
maxdV_V = max(ODF['dV_V'])
|
|
||||||
orientations = np.zeros((nSamples,3),'f')
|
|
||||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
|
||||||
unitInc = 1.0/nSamples
|
|
||||||
|
|
||||||
for j in range(nSamples):
|
|
||||||
MC = maxdV_V*2.0
|
|
||||||
bin = 0
|
|
||||||
while MC > ODF['dV_V'][bin]:
|
|
||||||
countMC += 1
|
|
||||||
MC = maxdV_V*random.random()
|
|
||||||
bin = int(ODF['nBins'] * random.random())
|
|
||||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
|
||||||
orientations[j] = np.degrees(Eulers)
|
|
||||||
reconstructedODF[bin] += unitInc
|
|
||||||
|
|
||||||
return orientations, reconstructedODF
|
|
||||||
|
|
||||||
|
|
||||||
def TothVanHoutteSTAT (ODF,nSamples):
|
|
||||||
"""ODF contains 'dV_V' (normalized to 1), 'center', 'intervals', 'limits' (in radians)."""
|
|
||||||
orientations = np.zeros((nSamples,3),'f')
|
|
||||||
reconstructedODF = np.zeros(ODF['nBins'],'f')
|
|
||||||
unitInc = 1.0/nSamples
|
|
||||||
|
|
||||||
selectors = [random.random() for i in range(nSamples)]
|
|
||||||
selectors.sort()
|
|
||||||
indexSelector = 0
|
|
||||||
|
|
||||||
cumdV_V = 0.0
|
|
||||||
countSamples = 0
|
|
||||||
|
|
||||||
for bin in range(ODF['nBins']) :
|
|
||||||
cumdV_V += ODF['dV_V'][bin]
|
|
||||||
while indexSelector < nSamples and selectors[indexSelector] < cumdV_V:
|
|
||||||
Eulers = binAsEulers(bin,ODF['interval'],ODF['delta'],ODF['center'])
|
|
||||||
orientations[countSamples] = np.degrees(Eulers)
|
|
||||||
reconstructedODF[bin] += unitInc
|
|
||||||
countSamples += 1
|
|
||||||
indexSelector += 1
|
|
||||||
|
|
||||||
damask.util.croak('created set of %i when asked to deliver %i'%(countSamples,nSamples))
|
|
||||||
|
|
||||||
return orientations, reconstructedODF
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description ="""
|
|
||||||
Transform linear binned ODF data into given number of orientations.
|
|
||||||
IA: integral approximation, STAT: Van Houtte, MC: Monte Carlo
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
algorithms = ['IA', 'STAT','MC']
|
|
||||||
parser.add_option('-n', '--nsamples',
|
|
||||||
dest = 'number',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'number of orientations to be generated [%default]')
|
|
||||||
parser.add_option('-a','--algorithm',
|
|
||||||
dest = 'algorithm',
|
|
||||||
choices = algorithms, metavar = 'string',
|
|
||||||
help = 'sampling algorithm {%s} [IA]'%(', '.join(algorithms)))
|
|
||||||
parser.add_option('-p','--phase',
|
|
||||||
dest = 'phase',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'phase index to be used [%default]')
|
|
||||||
parser.add_option('-r', '--rnd',
|
|
||||||
dest = 'randomSeed',
|
|
||||||
type = 'int', metavar = 'int', \
|
|
||||||
help = 'seed of random number generator [%default]')
|
|
||||||
parser.set_defaults(randomSeed = None,
|
|
||||||
number = 500,
|
|
||||||
algorithm = 'IA',
|
|
||||||
phase = 1,
|
|
||||||
ang = True,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
table = damask.Table.load(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
|
|
||||||
randomSeed = int(os.urandom(4).hex(),16) if options.randomSeed is None else options.randomSeed # random seed per file
|
|
||||||
random.seed(randomSeed)
|
|
||||||
|
|
||||||
# --------------- figure out limits (left/right), delta, and interval -----------------------------
|
|
||||||
ODF = {}
|
|
||||||
eulers = table.get('euler')
|
|
||||||
limits = np.array([np.min(eulers,axis=0),np.max(eulers,axis=0)]) # min/max euler angles in degrees
|
|
||||||
ODF['limit'] = np.radians(limits[1,:]) # right hand limits in radians
|
|
||||||
ODF['center'] = 0.0 if all(limits[0,:]<1e-8) else 0.5 # vertex or cell centered
|
|
||||||
|
|
||||||
ODF['interval'] = np.array(list(map(len,[np.unique(eulers[:,i]) for i in range(3)])),'i') # steps are number of distict values
|
|
||||||
ODF['nBins'] = ODF['interval'].prod()
|
|
||||||
ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1)) # step size
|
|
||||||
|
|
||||||
if eulers.shape[0] != ODF['nBins']:
|
|
||||||
damask.util.croak('expecting %i values but got %i'%(ODF['nBins'],eulers.shape[0]))
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ----- build binnedODF array and normalize ------------------------------------------------------
|
|
||||||
sumdV_V = 0.0
|
|
||||||
ODF['dV_V'] = [None]*ODF['nBins']
|
|
||||||
ODF['nNonZero'] = 0
|
|
||||||
dg = ODF['delta'][0]*2.0*math.sin(ODF['delta'][1]/2.0)*ODF['delta'][2]
|
|
||||||
intensity = table.get('intensity')
|
|
||||||
for b in range(ODF['nBins']):
|
|
||||||
ODF['dV_V'][b] = \
|
|
||||||
max(0.0,intensity[b,0]) * dg * \
|
|
||||||
math.sin(((b//ODF['interval'][2])%ODF['interval'][1]+ODF['center'])*ODF['delta'][1])
|
|
||||||
if ODF['dV_V'][b] > 0.0:
|
|
||||||
sumdV_V += ODF['dV_V'][b]
|
|
||||||
ODF['nNonZero'] += 1
|
|
||||||
|
|
||||||
for b in range(ODF['nBins']):
|
|
||||||
ODF['dV_V'][b] /= sumdV_V # normalize dV/V
|
|
||||||
|
|
||||||
damask.util.croak(['non-zero fraction: %12.11f (%i/%i)'%(float(ODF['nNonZero'])/ODF['nBins'],
|
|
||||||
ODF['nNonZero'],
|
|
||||||
ODF['nBins']),
|
|
||||||
'Volume integral of ODF: %12.11f\n'%sumdV_V,
|
|
||||||
'Reference Integral: %12.11f\n'%(ODF['limit'][0]*ODF['limit'][2]*(1-math.cos(ODF['limit'][1]))),
|
|
||||||
])
|
|
||||||
|
|
||||||
Functions = {'IA': 'directInversion', 'STAT': 'TothVanHoutteSTAT', 'MC': 'MonteCarloBins'}
|
|
||||||
method = Functions[options.algorithm]
|
|
||||||
|
|
||||||
Orientations, ReconstructedODF = (globals()[method])(ODF,options.number)
|
|
||||||
|
|
||||||
# calculate accuracy of sample
|
|
||||||
squaredDiff = {'orig':0.0,method:0.0}
|
|
||||||
squaredRelDiff = {'orig':0.0,method:0.0}
|
|
||||||
mutualProd = {'orig':0.0,method:0.0}
|
|
||||||
indivSum = {'orig':0.0,method:0.0}
|
|
||||||
indivSquaredSum = {'orig':0.0,method:0.0}
|
|
||||||
|
|
||||||
for bin in range(ODF['nBins']):
|
|
||||||
squaredDiff[method] += (ODF['dV_V'][bin] - ReconstructedODF[bin])**2
|
|
||||||
if ODF['dV_V'][bin] > 0.0:
|
|
||||||
squaredRelDiff[method] += (ODF['dV_V'][bin] - ReconstructedODF[bin])**2/ODF['dV_V'][bin]**2
|
|
||||||
mutualProd[method] += ODF['dV_V'][bin]*ReconstructedODF[bin]
|
|
||||||
indivSum[method] += ReconstructedODF[bin]
|
|
||||||
indivSquaredSum[method] += ReconstructedODF[bin]**2
|
|
||||||
indivSum['orig'] += ODF['dV_V'][bin]
|
|
||||||
indivSquaredSum['orig'] += ODF['dV_V'][bin]**2
|
|
||||||
|
|
||||||
damask.util.croak(['sqrt(N*)RMSD of ODFs:\t %12.11f'% math.sqrt(options.number*squaredDiff[method]),
|
|
||||||
'RMSrD of ODFs:\t %12.11f'%math.sqrt(squaredRelDiff[method]),
|
|
||||||
'rMSD of ODFs:\t %12.11f'%(squaredDiff[method]/indivSquaredSum['orig']),
|
|
||||||
'nNonZero correlation slope:\t %12.11f'\
|
|
||||||
%((ODF['nNonZero']*mutualProd[method]-indivSum['orig']*indivSum[method])/\
|
|
||||||
(ODF['nNonZero']*indivSquaredSum['orig']-indivSum['orig']**2)),
|
|
||||||
'nNonZero correlation confidence:\t %12.11f'\
|
|
||||||
%((mutualProd[method]-indivSum['orig']*indivSum[method]/ODF['nNonZero'])/\
|
|
||||||
(ODF['nNonZero']*math.sqrt((indivSquaredSum['orig']/ODF['nNonZero']-(indivSum['orig']/ODF['nNonZero'])**2)*\
|
|
||||||
(indivSquaredSum[method]/ODF['nNonZero']-(indivSum[method]/ODF['nNonZero'])**2)))),
|
|
||||||
])
|
|
||||||
|
|
||||||
if method == 'IA' and options.number < ODF['nNonZero']:
|
|
||||||
strOpt = '(%i)'%ODF['nNonZero']
|
|
||||||
|
|
||||||
formatwidth = 1+int(math.log10(options.number))
|
|
||||||
|
|
||||||
materialConfig = [
|
|
||||||
'#' + scriptID + ' ' + ' '.join(sys.argv[1:]),
|
|
||||||
'# random seed %i'%randomSeed,
|
|
||||||
'#-------------------#',
|
|
||||||
'<microstructure>',
|
|
||||||
'#-------------------#',
|
|
||||||
]
|
|
||||||
|
|
||||||
for i,ID in enumerate(range(options.number)):
|
|
||||||
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
|
|
||||||
'(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)),
|
|
||||||
]
|
|
||||||
|
|
||||||
materialConfig += [
|
|
||||||
'#-------------------#',
|
|
||||||
'<texture>',
|
|
||||||
'#-------------------#',
|
|
||||||
]
|
|
||||||
|
|
||||||
for ID in range(options.number):
|
|
||||||
eulers = Orientations[ID]
|
|
||||||
|
|
||||||
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
|
|
||||||
'(gauss) phi1 {} Phi {} phi2 {} scatter 0.0 fraction 1.0'.format(*eulers),
|
|
||||||
]
|
|
||||||
|
|
||||||
#--- output finalization --------------------------------------------------------------------------
|
|
||||||
|
|
||||||
with (open(os.path.splitext(name)[0]+'_'+method+'_'+str(options.number)+'_material.config','w')) as outfile:
|
|
||||||
outfile.write('\n'.join(materialConfig)+'\n')
|
|
|
@ -1,68 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from io import StringIO
|
|
||||||
from optparse import OptionParser
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
#--------------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
||||||
Create seed file taking material indices from given geom file.
|
|
||||||
Indices can be black-listed or white-listed.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-w',
|
|
||||||
'--white',
|
|
||||||
action = 'extend', metavar = '<int LIST>',
|
|
||||||
dest = 'whitelist',
|
|
||||||
help = 'whitelist of grain IDs')
|
|
||||||
parser.add_option('-b',
|
|
||||||
'--black',
|
|
||||||
action = 'extend', metavar = '<int LIST>',
|
|
||||||
dest = 'blacklist',
|
|
||||||
help = 'blacklist of grain IDs')
|
|
||||||
|
|
||||||
parser.set_defaults(whitelist = [],
|
|
||||||
blacklist = [],
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
options.whitelist = [int(i) for i in options.whitelist]
|
|
||||||
options.blacklist = [int(i) for i in options.blacklist]
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
geom = damask.Geom.load_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
|
||||||
material = geom.material.reshape((-1,1),order='F')
|
|
||||||
|
|
||||||
mask = np.logical_and(np.in1d(material,options.whitelist,invert=False) if options.whitelist else \
|
|
||||||
np.full(geom.grid.prod(),True,dtype=bool),
|
|
||||||
np.in1d(material,options.blacklist,invert=True) if options.blacklist else \
|
|
||||||
np.full(geom.grid.prod(),True,dtype=bool))
|
|
||||||
|
|
||||||
seeds = damask.grid_filters.cell_coord0(geom.grid,geom.size).reshape(-1,3,order='F')
|
|
||||||
|
|
||||||
comments = geom.comments \
|
|
||||||
+ [scriptID + ' ' + ' '.join(sys.argv[1:]),
|
|
||||||
'grid\ta {}\tb {}\tc {}'.format(*geom.grid),
|
|
||||||
'size\tx {}\ty {}\tz {}'.format(*geom.size),
|
|
||||||
'origin\tx {}\ty {}\tz {}'.format(*geom.origin),
|
|
||||||
]
|
|
||||||
|
|
||||||
damask.Table(seeds[mask],{'pos':(3,)},comments)\
|
|
||||||
.add('material',material[mask].astype(int))\
|
|
||||||
.save(sys.stdout if name is None else os.path.splitext(name)[0]+'.seeds',legacy=True)
|
|
|
@ -1,165 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from optparse import OptionParser,OptionGroup
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from scipy import spatial
|
|
||||||
|
|
||||||
import damask
|
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
# MAIN
|
|
||||||
# --------------------------------------------------------------------
|
|
||||||
|
|
||||||
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options', description = """
|
|
||||||
Distribute given number of points randomly within rectangular cuboid.
|
|
||||||
Reports positions with random crystal orientations in seeds file format to STDOUT.
|
|
||||||
|
|
||||||
""", version = scriptID)
|
|
||||||
|
|
||||||
parser.add_option('-N',
|
|
||||||
dest = 'N',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'number of seed points [%default]')
|
|
||||||
parser.add_option('-s',
|
|
||||||
'--size',
|
|
||||||
dest = 'size',
|
|
||||||
type = 'float', nargs = 3, metavar = 'float float float',
|
|
||||||
help='size x,y,z of unit cube to fill %default')
|
|
||||||
parser.add_option('-g',
|
|
||||||
'--grid',
|
|
||||||
dest = 'grid',
|
|
||||||
type = 'int', nargs = 3, metavar = 'int int int',
|
|
||||||
help='min a,b,c grid of hexahedral box %default')
|
|
||||||
parser.add_option('-m',
|
|
||||||
'--microstructure',
|
|
||||||
dest = 'microstructure',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'first microstructure index [%default]')
|
|
||||||
parser.add_option('-r',
|
|
||||||
'--rnd',
|
|
||||||
dest = 'randomSeed', type = 'int', metavar = 'int',
|
|
||||||
help = 'seed of random number generator [%default]')
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Laguerre Tessellation",
|
|
||||||
"Parameters determining shape of weight distribution of seed points"
|
|
||||||
)
|
|
||||||
group.add_option( '-w',
|
|
||||||
'--weights',
|
|
||||||
action = 'store_true',
|
|
||||||
dest = 'weights',
|
|
||||||
help = 'assign random weights to seed points for Laguerre tessellation [%default]')
|
|
||||||
group.add_option( '--max',
|
|
||||||
dest = 'max',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'max of uniform distribution for weights [%default]')
|
|
||||||
group.add_option( '--mean',
|
|
||||||
dest = 'mean',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'mean of normal distribution for weights [%default]')
|
|
||||||
group.add_option( '--sigma',
|
|
||||||
dest = 'sigma',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help='standard deviation of normal distribution for weights [%default]')
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
group = OptionGroup(parser, "Selective Seeding",
|
|
||||||
"More uniform distribution of seed points using Mitchell's Best Candidate Algorithm"
|
|
||||||
)
|
|
||||||
group.add_option( '--selective',
|
|
||||||
action = 'store_true',
|
|
||||||
dest = 'selective',
|
|
||||||
help = 'selective picking of seed points from random seed points')
|
|
||||||
group.add_option( '--distance',
|
|
||||||
dest = 'distance',
|
|
||||||
type = 'float', metavar = 'float',
|
|
||||||
help = 'minimum distance to next neighbor [%default]')
|
|
||||||
group.add_option( '--numCandidates',
|
|
||||||
dest = 'numCandidates',
|
|
||||||
type = 'int', metavar = 'int',
|
|
||||||
help = 'size of point group to select best distance from [%default]')
|
|
||||||
parser.add_option_group(group)
|
|
||||||
|
|
||||||
parser.set_defaults(randomSeed = None,
|
|
||||||
grid = (16,16,16),
|
|
||||||
size = (1.0,1.0,1.0),
|
|
||||||
N = 20,
|
|
||||||
weights = False,
|
|
||||||
max = 0.0,
|
|
||||||
mean = 0.2,
|
|
||||||
sigma = 0.05,
|
|
||||||
microstructure = 1,
|
|
||||||
selective = False,
|
|
||||||
distance = 0.2,
|
|
||||||
numCandidates = 10,
|
|
||||||
)
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
size = np.array(options.size)
|
|
||||||
grid = np.array(options.grid)
|
|
||||||
np.random.seed(int(os.urandom(4).hex(),16) if options.randomSeed is None else options.randomSeed)
|
|
||||||
|
|
||||||
for name in filenames:
|
|
||||||
damask.util.report(scriptName,name)
|
|
||||||
|
|
||||||
if options.N > np.prod(grid):
|
|
||||||
damask.util.croak('More seeds than grid positions.')
|
|
||||||
sys.exit()
|
|
||||||
if options.selective and options.distance < min(size/grid):
|
|
||||||
damask.util.croak('Distance must be larger than grid spacing.')
|
|
||||||
sys.exit()
|
|
||||||
if options.selective and options.distance**3*options.N > 0.5*np.prod(size):
|
|
||||||
damask.util.croak('Number of seeds for given size and distance should be < {}.'\
|
|
||||||
.format(int(0.5*np.prod(size)/options.distance**3)))
|
|
||||||
|
|
||||||
eulers = np.random.rand(options.N,3) # create random Euler triplets
|
|
||||||
eulers[:,0] *= 360.0 # phi_1 is uniformly distributed
|
|
||||||
eulers[:,1] = np.degrees(np.arccos(2*eulers[:,1]-1.0)) # cos(Phi) is uniformly distributed
|
|
||||||
eulers[:,2] *= 360.0 # phi_2 is uniformly distributed
|
|
||||||
|
|
||||||
|
|
||||||
if not options.selective:
|
|
||||||
coords = damask.grid_filters.cell_coord0(grid,size).reshape(-1,3,order='F')
|
|
||||||
seeds = coords[np.random.choice(np.prod(grid), options.N, replace=False)] \
|
|
||||||
+ np.broadcast_to(size/grid,(options.N,3))*(np.random.rand(options.N,3)*.5-.25) # wobble without leaving grid
|
|
||||||
else:
|
|
||||||
seeds = np.empty((options.N,3))
|
|
||||||
seeds[0] = np.random.random(3) * size
|
|
||||||
|
|
||||||
i = 1
|
|
||||||
progress = damask.util._ProgressBar(options.N,'',50)
|
|
||||||
while i < options.N:
|
|
||||||
candidates = np.random.rand(options.numCandidates,3)*np.broadcast_to(size,(options.numCandidates,3))
|
|
||||||
tree = spatial.cKDTree(seeds[:i])
|
|
||||||
distances, dev_null = tree.query(candidates)
|
|
||||||
best = distances.argmax()
|
|
||||||
if distances[best] > options.distance: # require minimum separation
|
|
||||||
seeds[i] = candidates[best] # maximum separation to existing point cloud
|
|
||||||
i += 1
|
|
||||||
progress.update(i)
|
|
||||||
|
|
||||||
|
|
||||||
comments = [scriptID + ' ' + ' '.join(sys.argv[1:]),
|
|
||||||
'grid\ta {}\tb {}\tc {}'.format(*grid),
|
|
||||||
'size\tx {}\ty {}\tz {}'.format(*size),
|
|
||||||
'randomSeed\t{}'.format(options.randomSeed),
|
|
||||||
]
|
|
||||||
|
|
||||||
table = damask.Table(np.hstack((seeds,eulers)),{'pos':(3,),'euler':(3,)},comments)\
|
|
||||||
.add('microstructure',np.arange(options.microstructure,options.microstructure + options.N,dtype=int))
|
|
||||||
|
|
||||||
if options.weights:
|
|
||||||
weights = np.random.uniform(low = 0, high = options.max, size = options.N) if options.max > 0.0 \
|
|
||||||
else np.random.normal(loc = options.mean, scale = options.sigma, size = options.N)
|
|
||||||
table = table.add('weight',weights)
|
|
||||||
|
|
||||||
table.save(sys.stdout if name is None else name,legacy=True)
|
|
Loading…
Reference in New Issue