Merge remote branch 'origin/development' into pheno+

This commit is contained in:
zhangc43 2016-04-19 14:56:26 -04:00
commit f7396922c2
15 changed files with 579 additions and 480 deletions

View File

@ -71,18 +71,18 @@ Deals with both vector- and tensor-valued fields.
parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar='string',
help = 'column heading for coordinates [%default]')
type = 'string', metavar = 'string',
help = 'column label of coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values')
help = 'column label(s) of vector field values')
parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values')
help = 'column label(s) of tensor field values')
parser.set_defaults(coords = 'ipinitialcoord',
parser.set_defaults(coords = 'pos',
)
(options,filenames) = parser.parse_args()

View File

@ -1,164 +0,0 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,math
import numpy as np
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def deformedCoordsFFT(F,undeformed=False):
wgt = 1.0/grid.prod()
integrator = np.array([0.+1.j,0.+1.j,0.+1.j],'c16') * size/ 2.0 / math.pi
step = size/grid
F_fourier = np.fft.rfftn(F,axes=(0,1,2))
coords_fourier = np.zeros(F_fourier.shape[0:4],'c16')
if undeformed:
Favg=np.eye(3)
else:
Favg=np.real(F_fourier[0,0,0,:,:])*wgt
#--------------------------------------------------------------------------------------------------
# integration in Fourier space
k_s = np.zeros([3],'i')
for i in xrange(grid[2]):
k_s[2] = i
if(i > grid[2]//2 ): k_s[2] = k_s[2] - grid[2]
for j in xrange(grid[1]):
k_s[1] = j
if(j > grid[1]//2 ): k_s[1] = k_s[1] - grid[1]
for k in xrange(grid[0]//2+1):
k_s[0] = k
for m in xrange(3):
coords_fourier[i,j,k,m] = sum(F_fourier[i,j,k,m,0:3]*k_s*integrator)
if (any(k_s != 0)):
coords_fourier[i,j,k,0:3] /= -sum(k_s*k_s)
#--------------------------------------------------------------------------------------------------
# add average to scaled fluctuation and put (0,0,0) on (0,0,0)
coords = np.fft.irfftn(coords_fourier,F.shape[0:3],axes=(0,1,2))
offset_coords = np.dot(F[0,0,0,:,:],step/2.0) - scaling*coords[0,0,0,0:3]
for z in xrange(grid[2]):
for y in xrange(grid[1]):
for x in xrange(grid[0]):
coords[z,y,x,0:3] = scaling*coords[z,y,x,0:3] \
+ offset_coords \
+ np.dot(Favg,step*np.array([x,y,z]))
return coords
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add deformed configuration of given initial coordinates.
Operates on periodic three-dimensional x,y,z-ordered data sets.
""", version = scriptID)
parser.add_option('-f', '--defgrad',dest='defgrad', metavar = 'string',
help='heading of deformation gradient columns [%default]')
parser.add_option('--reference', dest='undeformed', action='store_true',
help='map results to reference (undeformed) average configuration [%default]')
parser.add_option('--scaling', dest='scaling', action='extend', metavar = '<float LIST>',
help='scaling of fluctuation')
parser.add_option('-u', '--unitlength', dest='unitlength', type='float', metavar = 'float',
help='set unit length for 2D model [%default]')
parser.add_option('--coordinates', dest='coords', metavar='string',
help='column heading for coordinates [%default]')
parser.set_defaults(defgrad = 'f')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(scaling = [])
parser.set_defaults(undeformed = False)
parser.set_defaults(unitlength = 0.0)
(options,filenames) = parser.parse_args()
options.scaling += [1.0 for i in xrange(max(0,3-len(options.scaling)))]
scaling = map(float, options.scaling)
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad))
else: colF = table.label_index(options.defgrad)
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for coord in xrange(3):
label = '{}_{}_{}'.format(coord+1,options.defgrad,options.coords)
if np.any(scaling) != 1.0: label+='_{}_{}_{}'.format(scaling)
if options.undeformed: label+='_undeformed'
table.labels_append([label]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ read deformation gradient field -----------------------
centroids = deformedCoordsFFT(table.data[:,colF:colF+9].reshape(grid[2],grid[1],grid[0],3,3),
options.undeformed)
# ------------------------------------------ process data ------------------------------------------
table.data_rewind()
for z in xrange(grid[2]):
for y in xrange(grid[1]):
for x in xrange(grid[0]):
table.data_read()
table.data_append(list(centroids[z,y,x,:]))
table.data_write()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -0,0 +1,227 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,math
import numpy as np
import scipy.ndimage
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in xrange(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid)+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
origin = -1, # offset to have cell origin as center
) # now averaged at cell origins
node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z
node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y
node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x
nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1)
return nodeData
#--------------------------------------------------------------------------------------------------
def displacementAvgFFT(F,grid,size,nodal=False,transformed=False):
"""calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[0],1+grid[0]),
np.linspace(0,size[1],1+grid[1]),
np.linspace(0,size[2],1+grid[2]),
indexing = 'ij')
else:
x, y, z = np.meshgrid(np.linspace(0,size[0],grid[0],endpoint=False),
np.linspace(0,size[1],grid[1],endpoint=False),
np.linspace(0,size[2],grid[2],endpoint=False),
indexing = 'ij')
origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data
Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average
avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X
return avgDisplacement
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / math.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])),
np.arange(grid[0]//2+1),
indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
k_sSquared = np.einsum('...l,...l',k_s,k_s)
k_sSquared[0,0,0] = 1.0 # ignore global average frequency
#--------------------------------------------------------------------------------------------------
# integration in Fourier space
displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm',
F if transformed else np.fft.rfftn(F,axes=(0,1,2)),
k_s,
integrator,
) / k_sSquared[...,np.newaxis]
#--------------------------------------------------------------------------------------------------
# backtransformation to real space
displacement = np.fft.irfftn(displacement_fourier,grid,axes=(0,1,2))
return cell2node(displacement,grid) if nodal else displacement
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add displacments resulting from deformation gradient field.
Operates on periodic three-dimensional x,y,z-ordered data sets.
Outputs at cell centers or cell nodes (into separate file).
""", version = scriptID)
parser.add_option('-f', '--defgrad',
dest = 'defgrad',
metavar = 'string',
help = 'column label of deformation gradient [%default]')
parser.add_option('-c', '--coordinates',
dest = 'coords',
metavar = 'string',
help = 'column label of coordinates [%default]')
parser.add_option('--nodal',
dest = 'nodal',
action = 'store_true',
help = 'output nodal (not cell-centered) displacements')
parser.set_defaults(defgrad = 'f',
coords = 'pos',
nodal = False,
)
(options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
outname = (os.path.splitext(name)[0]+
'_nodal'+
os.path.splitext(name)[1]) if (options.nodal and name) else None,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.defgrad) != 9:
errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad))
coordDim = table.label_dimension(options.coords)
if not 3 >= coordDim >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
elif coordDim < 3:
remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim,
's' if coordDim < 2 else '',
options.coords))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray([options.defgrad,options.coords])
table.data_rewind()
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data[:,9:].shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
coords = [np.unique(table.data[:,9+i]) for i in xrange(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ------------------------------------------
F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once...
displacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True)
avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True)
# ------------------------------------------ assemble header ---------------------------------------
if options.nodal:
table.info_clear()
table.labels_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) +
['{}_avg({}).{}' .format(i+1,options.defgrad,options.coords) for i in xrange(3)] +
['{}_fluct({}).{}'.format(i+1,options.defgrad,options.coords) for i in xrange(3)] )
table.head_write()
# ------------------------------------------ output data -------------------------------------------
zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else xrange(grid[2])
yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else xrange(grid[1])
xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else xrange(grid[0])
for i,z in enumerate(zrange):
for j,y in enumerate(yrange):
for k,x in enumerate(xrange):
if options.nodal: table.data_clear()
else: table.data_read()
table.data_append([x,y,z] if options.nodal else [])
table.data_append(list(avgDisplacement[i,j,k,:]))
table.data_append(list( displacement[i,j,k,:]))
table.data_write()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -58,17 +58,17 @@ Deals with both vector- and tensor-valued fields.
parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'column heading for coordinates [%default]')
help = 'column label of coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values')
help = 'column label(s) of vector field values')
parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing tensor field values')
help = 'column label(s) of tensor field values')
parser.set_defaults(coords = 'ipinitialcoord',
parser.set_defaults(coords = 'pos',
)
(options,filenames) = parser.parse_args()

View File

@ -89,19 +89,20 @@ Add column(s) containing Euclidean distance to grain structural features: bounda
""", version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
help='column heading for coordinates [%default]')
help='column label of coordinates [%default]')
parser.add_option('-i','--identifier', dest='id', metavar = 'string',
help='heading of column containing grain identifier [%default]')
help='column label of grain identifier [%default]')
parser.add_option('-t','--type', dest = 'type', action = 'extend', metavar = '<string LIST>',
help = 'feature type {%s} '%(', '.join(map(lambda x:'/'.join(x['names']),features))) )
parser.add_option('-n','--neighborhood',dest='neighborhood', choices = neighborhoods.keys(), metavar = 'string',
help = 'type of neighborhood [neumann] {%s}'%(', '.join(neighborhoods.keys())))
parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar='float',
parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar = 'float',
help = 'voxel size [%default]')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(id = 'texture')
parser.set_defaults(neighborhood = 'neumann')
parser.set_defaults(scale = 1.0)
parser.set_defaults(coords = 'pos',
id = 'texture',
neighborhood = 'neumann',
scale = 1.0,
)
(options,filenames) = parser.parse_args()
@ -125,10 +126,8 @@ for i,feature in enumerate(features):
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name, buffered = False)
except:
continue
try: table = damask.ASCIItable(name = name, buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
@ -141,9 +140,11 @@ for name in filenames:
remarks = []
column = {}
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
coordDim = table.label_dimension(options.coords)
if not 3 >= coordDim >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
else: coordCol = table.label_index(options.coords)
if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id))
else: idCol = table.label_index(options.id)
@ -164,18 +165,20 @@ for name in filenames:
table.data_readArray()
coords = [{},{},{}]
for i in xrange(len(table.data)):
for j in xrange(3):
coords[j][str(table.data[i,coordCol+j])] = True
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness
coords = [np.unique(table.data[:,coordCol+i]) for i in xrange(coordDim)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords)+[1]*(3-len(coords)),'i')
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid '.format(N) +
'x'.join(map(str,grid)) +
'.')
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process value field -----------------------------------

View File

@ -9,7 +9,9 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def gradFFT(geomdim,field):
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
@ -17,7 +19,7 @@ def gradFFT(geomdim,field):
elif n == 1: dataType = 'scalar'
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
grad_fourier = np.zeros(field_fourier.shape+(3,),'c16')
grad_fourier = np.zeros(field_fourier.shape+(3,),'c16')
# differentiation in Fourier space
k_s = np.zeros([3],'i')
@ -61,17 +63,17 @@ Deals with both vector- and scalar fields.
parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar='string',
help = 'column heading for coordinates [%default]')
help = 'column label of coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values')
help = 'column label(s) of vector field values')
parser.add_option('-s','--scalar',
dest = 'scalar',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing scalar field values')
help = 'column label(s) of scalar field values')
parser.set_defaults(coords = 'ipinitialcoord',
parser.set_defaults(coords = 'pos',
)
(options,filenames) = parser.parse_args()
@ -96,7 +98,7 @@ for name in filenames:
items = {
'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []

View File

@ -5,7 +5,6 @@ import numpy as np
import damask
from optparse import OptionParser
from scipy import spatial
from collections import defaultdict
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
@ -23,7 +22,7 @@ parser.add_option('-r', '--radius',
parser.add_option('-d', '--disorientation',
dest = 'disorientation',
type = 'float', metavar = 'float',
help = 'disorientation threshold per grain [%default] (degrees)')
help = 'disorientation threshold in degrees [%default]')
parser.add_option('-s', '--symmetry',
dest = 'symmetry',
type = 'string', metavar = 'string',
@ -61,7 +60,8 @@ parser.add_option('-p', '--position',
type = 'string', metavar = 'string',
help = 'spatial position of voxel [%default]')
parser.set_defaults(symmetry = 'cubic',
parser.set_defaults(disorientation = 5,
symmetry = 'cubic',
coords = 'pos',
degrees = False,
)
@ -86,17 +86,16 @@ if np.sum(input) != 1: parser.error('needs exactly one input format.')
(options.matrix,9,'matrix'),
(options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
cos_disorientation = np.cos(options.disorientation/2.*toRadians)
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
cos_disorientation = np.cos(np.radians(options.disorientation/2.)) # cos of half the disorientation angle
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
@ -109,8 +108,10 @@ for name in filenames:
errors = []
remarks = []
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim))
if not 3 >= table.label_dimension(options.coords) >= 1:
errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
if not np.all(table.label_dimension(label) == dim):
errors.append('input {} does not have dimension {}.'.format(label,dim))
else: column = table.label_index(label)
if remarks != []: damask.util.croak(remarks)
@ -122,8 +123,10 @@ for name in filenames:
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append('grainID_{}@{}'.format(label,
options.disorientation if options.degrees else np.degrees(options.disorientation))) # report orientation source and disorientation in degrees
table.labels_append('grainID_{}@{:g}'.format('+'.join(label)
if isinstance(label, (list,tuple))
else label,
options.disorientation)) # report orientation source and disorientation
table.head_write()
# ------------------------------------------ process data ------------------------------------------
@ -162,7 +165,7 @@ for name in filenames:
time_delta = (time.clock()-tick) * (len(grainID) - p) / p
bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'\
%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),np.count_nonzero(memberCounts)))
if inputtype == 'eulers':
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
@ -179,84 +182,51 @@ for name in filenames:
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
symmetry = options.symmetry).reduced()
matched = False
matched = False
alreadyChecked = {}
candidates = []
bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case
# check against last matched needs to be really picky. best would be to exclude jumps across the poke (checking distance between last and me?)
# when walking through neighborhood first check whether grainID of that point has already been tested, if yes, skip!
if matchedID != -1: # has matched before?
matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation
if not matched:
alreadyChecked = {}
bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
gID = grainID[i]
if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested?
alreadyChecked[gID] = True # remember not to check again
disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation
if disorientation.quaternion.w > cos_disorientation and \
disorientation.quaternion.w >= bestDisorientation.w: # within threshold and betterthan current best?
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
gID = grainID[i]
if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested?
alreadyChecked[gID] = True # remember not to check again
disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation
if disorientation.quaternion.w > cos_disorientation: # within threshold ...
candidates.append(gID) # remember as potential candidate
if disorientation.quaternion.w >= bestDisorientation.w: # ... and better than current best?
matched = True
matchedID = gID # remember that grain
bestDisorientation = disorientation.quaternion
if not matched: # no match -> new grain found
memberCounts += [1] # start new membership counter
if matched: # did match existing grain
memberCounts[matchedID] += 1
if len(candidates) > 1: # ambiguity in grain identification?
largestGrain = sorted(candidates,key=lambda x:memberCounts[x])[-1] # find largest among potential candidate grains
matchedID = largestGrain
for c in [c for c in candidates if c != largestGrain]: # loop over smaller candidates
memberCounts[largestGrain] += memberCounts[c] # reassign member count of smaller to largest
memberCounts[c] = 0
grainID = np.where(np.in1d(grainID,candidates), largestGrain, grainID) # relabel grid points of smaller candidates as largest one
else: # no match -> new grain found
orientations += [o] # initialize with current orientation
memberCounts += [1] # start new membership counter
matchedID = g
g += 1 # increment grain counter
else: # did match existing grain
memberCounts[matchedID] += 1
grainID[p] = matchedID # remember grain index assigned to point
p += 1 # increment point
bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations)))
memberCounts = np.array(memberCounts)
similarOrientations = [[] for i in xrange(len(orientations))]
for i,orientation in enumerate(orientations[:-1]): # compare each identified orientation...
for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards
if orientation.disorientation(orientations[j],SST = False)[0].quaternion.w > cos_disorientation: # similar orientations in both grainIDs?
similarOrientations[i].append(j) # remember in upper triangle...
similarOrientations[j].append(i) # ...and lower triangle of matrix
if similarOrientations[i] != []:
bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(str,similarOrientations[i]))))
stillShifting = True
while stillShifting:
stillShifting = False
tick = time.clock()
for p,gID in enumerate(grainID): # walk through all points
if p > 0 and p % 1000 == 0:
time_delta = (time.clock()-tick) * (len(grainID) - p) / p
bg.set_message('(%02i:%02i:%02i) shifting ID of point %i out of %i (grain count %i)...'
%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else?
similarNeighbors = defaultdict(int) # frequency of neighboring grainIDs sharing my orientation
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring point
if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation?
similarNeighbors[grainID[i]] += 1 # remember its grainID
if similarNeighbors != {}: # found similar orientation(s) in neighborhood
candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me
grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset
memberCounts[gID] -= 1 # my former ID loses one fellow
memberCounts[grainID[p]] += 1 # my new ID gains one fellow
bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID
stillShifting = True
grainIDs = np.where(np.array(memberCounts) > 0)[0] # identify "live" grain identifiers
packingMap = dict(zip(list(grainIDs),range(len(grainIDs)))) # map to condense into consecutive IDs
table.data_rewind()
outputAlive = True
p = 0
while outputAlive and table.data_read(): # read next data line of ASCII table
table.data_append(1+grainID[p]) # add grain ID
table.data_append(1+packingMap[grainID[p]]) # add (condensed) grain ID
outputAlive = table.data_write() # output processed line
p += 1

View File

@ -22,7 +22,7 @@ Average each data block of size 'packing' into single values thus reducing the f
parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'column heading for coordinates [%default]')
help = 'column label of coordinates [%default]')
parser.add_option('-p','--packing',
dest = 'packing',
type = 'int', nargs = 3, metavar = 'int int int',
@ -39,7 +39,7 @@ parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar = 'float float float',
help = 'size in x,y,z [autodetect]')
parser.set_defaults(coords = 'ipinitialcoord',
parser.set_defaults(coords = 'pos',
packing = (2,2,2),
shift = (0,0,0),
grid = (0,0,0),
@ -59,11 +59,10 @@ if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift)
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
outname = os.path.join(os.path.dirname(name),
prefix+os.path.basename(name)) if name else name,
buffered = False)
try: table = damask.ASCIItable(name = name,
outname = os.path.join(os.path.dirname(name),
prefix+os.path.basename(name)) if name else name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
@ -75,7 +74,6 @@ for name in filenames:
errors = []
remarks = []
colCoord = None
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
@ -86,7 +84,6 @@ for name in filenames:
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))

View File

@ -19,83 +19,82 @@ to resolution*packing.
""", version = scriptID)
parser.add_option('-c','--coordinates', dest='coords', metavar='string',
help='column heading for coordinates [%default]')
parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, metavar='int int int',
help='dimension of packed group [%default]')
parser.add_option('-g','--grid', dest='resolution', type='int', nargs=3, metavar='int int int',
help='resolution in x,y,z [autodetect]')
parser.add_option('-s','--size', dest='dimension', type='float', nargs=3, metavar='int int int',
help='dimension in x,y,z [autodetect]')
parser.set_defaults(coords = 'ipinitialcoord')
parser.set_defaults(packing = (2,2,2))
parser.set_defaults(grid = (0,0,0))
parser.set_defaults(size = (0.0,0.0,0.0))
parser.add_option('-c','--coordinates',
dest = 'coords', metavar = 'string',
help = 'column label of coordinates [%default]')
parser.add_option('-p','--packing',
dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int',
help = 'dimension of packed group [%default]')
parser.add_option('-g','--grid',
dest = 'resolution', type = 'int', nargs = 3, metavar = 'int int int',
help = 'resolution in x,y,z [autodetect]')
parser.add_option('-s','--size',
dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int',
help = 'dimension in x,y,z [autodetect]')
parser.set_defaults(coords = 'pos',
packing = (2,2,2),
grid = (0,0,0),
size = (0.0,0.0,0.0),
)
(options,filenames) = parser.parse_args()
options.packing = np.array(options.packing)
prefix = 'blowUp%ix%ix%i_'%(options.packing[0],options.packing[1],options.packing[2])
prefix = 'blowUp{}x{}x{}_'.format(*options.packing)
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
outname = os.path.join(os.path.dirname(name),
prefix+ \
os.path.basename(name)) if name else name,
buffered = False)
try: table = damask.ASCIItable(name = name,
outname = os.path.join(os.path.dirname(name),
prefix+os.path.basename(name)) if name else name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
errors = []
# ------------------------------------------ sanity checks ----------------------------------------
if table.label_dimension(options.coords) != 3:
damask.util.croak('coordinates {} are not a vector.'.format(options.coords))
errors = []
remarks = []
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
colElem = table.label_index('elem')
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
else:
coordCol = table.label_index(options.coords)
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [{},{},{}]
for i in xrange(len(table.data)):
for j in xrange(3):
coords[j][str(table.data[i,coordCol+j])] = True
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \
np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\
max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\
max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\
],'d') # size from bounding box, corrected for cell-centeredness
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
table.data_readArray(options.coords)
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
packing = np.array(options.packing,'i')
outSize = grid*packing
# ------------------------------------------ assemble header ---------------------------------------
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data -------------------------------------------
table.data_rewind()
data = np.zeros(outSize.tolist()+[len(table.labels)])
p = np.zeros(3,'i')
@ -107,15 +106,15 @@ for name in filenames:
table.data_read()
data[d[0]:d[0]+packing[0],
d[1]:d[1]+packing[1],
d[2]:d[2]+packing[2],
d[2]:d[2]+packing[2],
: ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size
elementSize = size/grid/packing
elem = 1
for c in xrange(outSize[2]):
for b in xrange(outSize[1]):
for a in xrange(outSize[0]):
data[a,b,c,coordCol:coordCol+3] = [a+0.5,b+0.5,c+0.5]*elementSize
data[a,b,c,table.label_index('elem')] = elem
data[a,b,c,colCoord:colCoord+3] = [a+0.5,b+0.5,c+0.5]*elementSize
if colElem != -1: data[a,b,c,colElem] = elem
table.data = data[a,b,c,:].tolist()
outputAlive = table.data_write() # output processed line
elem += 1

View File

@ -1003,11 +1003,8 @@ fileOpen = False
assembleHeader = True
header = []
standard = ['inc'] + \
{True: ['time'],
False:[]}[options.time] + \
['elem','node','ip','grain'] + \
{True: ['1_nodeinitialcoord','2_nodeinitialcoord','3_nodeinitialcoord'],
False:['1_ipinitialcoord','2_ipinitialcoord','3_ipinitialcoord']}[options.nodalScalar != []]
['time'] if options.time else [] + \
['elem','node','ip','grain','1_pos','2_pos','3_pos']
# --------------------------- loop over positions --------------------------------

View File

@ -1,8 +1,9 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,vtk
import os,vtk
import damask
from collections import defaultdict
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -17,125 +18,166 @@ Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp).
""", version = scriptID)
parser.add_option('-v', '--vtk', dest='vtk', \
parser.add_option( '--vtk',
dest = 'vtk',
type = 'string', metavar = 'string',
help = 'VTK file name')
parser.add_option( '--inplace',
dest = 'inplace',
action = 'store_true',
help = 'modify VTK file in-place')
parser.add_option('-r', '--render',
dest = 'render',
action = 'store_true',
help = 'open output in VTK render window')
parser.add_option('-s', '--scalar', dest='scalar', action='extend', \
help = 'scalar values')
parser.add_option('-v', '--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'vector value label(s)')
parser.add_option('-c', '--color', dest='color', action='extend', \
help = 'RGB color tuples')
parser.set_defaults(scalar = [])
parser.set_defaults(color = [])
parser.set_defaults(scalar = [],
vector = [],
color = [],
inplace = False,
render = False,
)
(options, filenames) = parser.parse_args()
datainfo = { # list of requested labels per datatype
'scalar': {'len':1,
'label':[]},
'color': {'len':3,
'label':[]},
}
if not options.vtk: parser.error('No VTK file specified.')
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
if not os.path.exists(options.vtk):
parser.error('VTK file does not exist'); sys.exit()
if os.path.splitext(options.vtk)[1] == '.vtp':
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(options.vtk)
reader.Update()
Polydata = reader.GetOutput()
elif os.path.splitext(options.vtk)[1] == '.vtk':
reader = vtk.vtkGenericDataObjectReader()
reader.SetFileName(options.vtk)
reader.Update()
Polydata = reader.GetPolyDataOutput()
else:
parser.error('Unsupported VTK file type extension.')
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(options.vtk)
reader.Update()
Npoints = reader.GetNumberOfPoints()
Ncells = reader.GetNumberOfCells()
Nvertices = reader.GetNumberOfVerts()
Polydata = reader.GetOutput()
Npoints = Polydata.GetNumberOfPoints()
Ncells = Polydata.GetNumberOfCells()
Nvertices = Polydata.GetNumberOfVerts()
if Npoints != Ncells or Npoints != Nvertices:
parser.error('Number of points, cells, and vertices in VTK differ from each other'); sys.exit()
if options.scalar is not None: datainfo['scalar']['label'] += options.scalar
if options.color is not None: datainfo['color']['label'] += options.color
parser.error('Number of points, cells, and vertices in VTK differ from each other.')
# ------------------------------------------ setup file handles ---------------------------------------
damask.util.croak('{}: {} points, {} vertices, and {} cells...'.format(options.vtk,Npoints,Nvertices,Ncells))
files = []
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':sys.stderr, 'croak':sys.stderr})
# --- loop over input files -------------------------------------------------------------------------
#--- loop over input files ------------------------------------------------------------------------
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
if filenames == []: filenames = [None]
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table
table.head_read() # read ASCII header info
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName, name)
# --------------- figure out columns to process
active = {}
column = {}
# --- interpret header ----------------------------------------------------------------------------
array = {}
table.head_read()
remarks = []
errors = []
VTKarray = {}
active = defaultdict(list)
for datatype,info in datainfo.items():
for label in info['label']:
foundIt = False
for key in ['1_'+label,label]:
if key in table.labels:
foundIt = True
if datatype not in active: active[datatype] = []
if datatype not in column: column[datatype] = {}
if datatype not in array: array[datatype] = {}
active[datatype].append(label)
column[datatype][label] = table.labels.index(key) # remember columns of requested data
if datatype == 'scalar':
array[datatype][label] = vtk.vtkDoubleArray()
array[datatype][label].SetNumberOfComponents(1)
array[datatype][label].SetName(label)
elif datatype == 'color':
array[datatype][label] = vtk.vtkUnsignedCharArray()
array[datatype][label].SetNumberOfComponents(3)
array[datatype][label].SetName(label)
if not foundIt:
file['croak'].write('column %s not found...\n'%label)
for datatype,dimension,label in [['scalar',1,options.scalar],
['vector',3,options.vector],
['color',3,options.color],
]:
for i,dim in enumerate(table.label_dimension(label)):
me = label[i]
if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me))
elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension))
else:
remarks.append('adding {} "{}"...'.format(datatype,me))
active[datatype].append(me)
if datatype in ['scalar','vector']: VTKarray[me] = vtk.vtkDoubleArray()
elif datatype == 'color': VTKarray[me] = vtk.vtkUnsignedCharArray()
VTKarray[me].SetNumberOfComponents(dimension)
VTKarray[me].SetName(label[i])
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ---------------------------------------
while table.data_read(): # read next data line of ASCII table
while table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over scalar,color
for label in labels: # loop over all requested items
theData = table.data[column[datatype][label]:\
column[datatype][label]+datainfo[datatype]['len']] # read strings
if datatype == 'color':
theData = map(lambda x: int(255.*float(x)),theData)
array[datatype][label].InsertNextTuple3(theData[0],theData[1],theData[2],)
elif datatype == 'scalar':
array[datatype][label].InsertNextValue(float(theData[0]))
for datatype,labels in active.items(): # loop over scalar,color
for me in labels: # loop over all requested items
theData = [table.data[i] for i in table.label_indexrange(me)] # read strings
if datatype == 'color': VTKarray[me].InsertNextTuple3(*map(lambda x: int(255.*float(x)),theData))
elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData))
elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0]))
table.input_close() # close input ASCII table
# ------------------------------------------ add data ---------------------------------------
for datatype,labels in active.items(): # loop over scalar,color
for datatype,labels in active.items(): # loop over scalar,color
if datatype == 'color':
Polydata.GetPointData().SetScalars(array[datatype][labels[0]])
Polydata.GetCellData().SetScalars(array[datatype][labels[0]])
for label in labels: # loop over all requested items
Polydata.GetPointData().AddArray(array[datatype][label])
Polydata.GetCellData().AddArray(array[datatype][label])
Polydata.GetPointData().SetScalars(VTKarray[active['color'][0]])
Polydata.GetCellData().SetScalars(VTKarray[active['color'][0]])
for me in labels: # loop over all requested items
Polydata.GetPointData().AddArray(VTKarray[me])
Polydata.GetCellData().AddArray(VTKarray[me])
Polydata.Modified()
if vtk.VTK_MAJOR_VERSION <= 5:
Polydata.Update()
if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update()
# ------------------------------------------ output result ---------------------------------------
writer = vtk.vtkXMLPolyDataWriter()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.splitext(options.vtk)[0]+'_added.vtp')
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(Polydata)
else:
writer.SetInputData(Polydata)
writer.Write()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtp' if options.inplace else '_added.vtp'))
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata)
else: writer.SetInputData(Polydata)
writer.Write()
# ------------------------------------------ render result ---------------------------------------
if options.render:
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(Polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the graphics structure. The renderer renders into the
# render window. The render window interactor captures mouse events
# and will perform appropriate camera or actor manipulation
# depending on the nature of the events.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()

View File

@ -30,10 +30,6 @@ parser.add_option('-r', '--render',
dest = 'render',
action = 'store_true',
help = 'open output in VTK render window')
parser.add_option('-m', '--mode',
dest = 'mode',
type = 'choice', metavar = 'string', choices = ['cell', 'point'],
help = 'cell-centered or point-centered data')
parser.add_option('-s', '--scalar',
dest = 'scalar',
action = 'extend', metavar = '<string LIST>',
@ -56,7 +52,6 @@ parser.set_defaults(scalar = [],
(options, filenames) = parser.parse_args()
if not options.mode: parser.error('No data mode specified.')
if not options.vtk: parser.error('No VTK file specified.')
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
@ -83,9 +78,9 @@ damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Nce
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False, readonly = True)
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName, name)
@ -124,8 +119,11 @@ for name in filenames:
# ------------------------------------------ process data ---------------------------------------
datacount = 0
while table.data_read(): # read next data line of ASCII table
datacount += 1 # count data lines
for datatype,labels in active.items(): # loop over scalar,color
for me in labels: # loop over all requested items
theData = [table.data[i] for i in table.label_indexrange(me)] # read strings
@ -133,15 +131,25 @@ for name in filenames:
elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData))
elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0]))
table.close() # close input ASCII table
# ------------------------------------------ add data ---------------------------------------
if datacount == Npoints: mode = 'point'
elif datacount == Ncells: mode = 'cell'
else:
damask.util.croak('Data count is incompatible with grid...')
continue
damask.util.croak('{} mode...'.format(mode))
for datatype,labels in active.items(): # loop over scalar,color
if datatype == 'color':
if options.mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]])
elif options.mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]])
if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]])
elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]])
for me in labels: # loop over all requested items
if options.mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me])
elif options.mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me])
if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me])
elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me])
rGrid.Modified()
if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update()

View File

@ -18,12 +18,12 @@ Produce a VTK point cloud dataset based on coordinates given in an ASCIItable.
""", version = scriptID)
parser.add_option('-d', '--deformed',
dest = 'deformed',
parser.add_option('-c', '--coordinates',
dest = 'pos',
type = 'string', metavar = 'string',
help = 'deformed coordinate label [%default]')
help = 'coordinate label [%default]')
parser.set_defaults(deformed = 'ipdeformedcoord'
parser.set_defaults(pos = 'pos'
)
(options, filenames) = parser.parse_args()
@ -46,9 +46,9 @@ for name in filenames:
errors = []
remarks = []
coordDim = table.label_dimension(options.deformed)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.deformed))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.deformed))
coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.pos))
if remarks != []: damask.util.croak(remarks)
if errors != []:
@ -58,7 +58,7 @@ for name in filenames:
# ------------------------------------------ process data ---------------------------------------
table.data_readArray(options.deformed)
table.data_readArray(options.pos)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3:
table.data = np.hstack((table.data,

View File

@ -15,7 +15,7 @@ scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Create regular voxel grid from points in an ASCIItable.
Create regular voxel grid from points in an ASCIItable (or geom file).
""", version = scriptID)
@ -24,11 +24,11 @@ parser.add_option('-m', '--mode',
type = 'choice', choices = ['cell','point'],
help = 'cell-centered or point-centered coordinates ')
parser.add_option('-c', '--coordinates',
dest = 'position',
dest = 'coords',
type = 'string', metavar = 'string',
help = 'coordinate label [%default]')
parser.set_defaults(position ='ipinitialcoord',
mode ='cell'
parser.set_defaults(coords = 'pos',
mode = 'cell'
)
(options, filenames) = parser.parse_args()
@ -38,9 +38,12 @@ parser.set_defaults(position ='ipinitialcoord',
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False, readonly = True)
isGeom = name.endswith('.geom')
try: table = damask.ASCIItable(name = name,
buffered = False,
labeled = not isGeom,
readonly = True,
)
except: continue
damask.util.report(scriptName,name)
@ -48,10 +51,13 @@ for name in filenames:
table.head_read()
errors = []
if table.label_dimension(options.position) != 3:
errors.append('coordinates {} are not a vector.'.format(options.position))
remarks = []
errors = []
coordDim = 3 if isGeom else table.label_dimension(options.coords)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
@ -59,17 +65,33 @@ for name in filenames:
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray(options.position)
if isGeom:
info,extra_header = table.head_getGeom()
coords = [np.linspace(info['origin'][i],
info['origin'][i]+info['size'][i],
num = info['grid'][i]+1,
endpoint = True,
) for i in xrange(3)]
else:
table.data_readArray(options.coords)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
if options.mode == 'cell':
coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + len(coords[i]) > 1]] + \
[coords[i][j-1] + coords[i][j] for j in xrange(1,len(coords[i]))] + \
[3.0 * coords[i][-1] - coords[i][-1 - (len(coords[i]) > 1)]]) for i in xrange(3)]
grid = np.array(map(len,coords),'i')
N = grid.prod() if options.mode == 'point' else (grid-1).prod()
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - options.mode == 'cell') ))
if options.mode == 'cell':
coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + len(coords[i]) > 1]] + \
[coords[i][j-1] + coords[i][j] for j in xrange(1,len(coords[i]))] + \
[3.0 * coords[i][-1] - coords[i][-1 - (len(coords[i]) > 1)]]) for i in xrange(3)]
grid = np.array(map(len,coords),'i')
N = grid.prod() if options.mode == 'point' or isGeom else (grid-1).prod()
if not isGeom and N != len(table.data):
errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - (options.mode == 'cell')) ))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
@ -100,9 +122,9 @@ for name in filenames:
(directory,filename) = os.path.split(name)
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] \
+'_{}({})'.format(options.position, options.mode) \
+'.'+writer.GetDefaultFileExtension()))
writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] +
('' if isGeom else '_{}({})'.format(options.coords, options.mode)) +
'.' + writer.GetDefaultFileExtension()))
else:
writer = vtk.vtkDataSetWriter()
writer.WriteToOutputStringOn()

View File

@ -6,15 +6,12 @@ import numpy as np
import damask
from scipy import ndimage
from optparse import OptionParser
from collections import defaultdict
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def mostFrequent(arr):
d = defaultdict(int)
for i in arr: d[i] += 1
return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0] # return value of most frequent microstructure
return np.argmax(np.bincount(arr))
#--------------------------------------------------------------------------------------------------
@ -43,10 +40,9 @@ parser.set_defaults(stencil = 3,
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False,
labeled = False)
try: table = damask.ASCIItable(name = name,
buffered = False,
labeled = False)
except: continue
damask.util.report(scriptName,name)
@ -72,7 +68,7 @@ for name in filenames:
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure
# --- do work ------------------------------------------------------------------------------------