diff --git a/processing/post/addCompatibilityMismatch.py b/processing/post/addCompatibilityMismatch.py index 14950cfec..883731b79 100755 --- a/processing/post/addCompatibilityMismatch.py +++ b/processing/post/addCompatibilityMismatch.py @@ -46,13 +46,14 @@ parser.set_defaults(coords = 'ipinitialcoord', # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -86,18 +87,12 @@ for name in filenames: table.data_readArray() - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,colCoord+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness - - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings N = grid.prod() @@ -115,9 +110,8 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') + table.data_writeArray() # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addDeformedConfiguration.py b/processing/post/addDeformedConfiguration.py index a45d6d80b..8e756bd5f 100755 --- a/processing/post/addDeformedConfiguration.py +++ b/processing/post/addDeformedConfiguration.py @@ -42,13 +42,14 @@ parser.set_defaults(coords = 'ipinitialcoord', # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -75,21 +76,21 @@ for name in filenames: table.data_readArray() - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,colCoord+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness - - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings N = grid.prod() - + + if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) + if errors != []: + table.croak(errors) + table.close(dismiss = True) + continue + # ------------------------------------------ assemble header --------------------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) @@ -107,9 +108,8 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- if len(stack) > 1: table.data = np.hstack(tuple(stack)) - table.data_writeArray('%.12g') + table.data_writeArray() # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addDeterminant.py b/processing/post/addDeterminant.py index f07490982..dc25e6303 100755 --- a/processing/post/addDeterminant.py +++ b/processing/post/addDeterminant.py @@ -38,13 +38,14 @@ if options.tensor == None: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -90,4 +91,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close input ASCII table (works for stdin) - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addGrainID.py b/processing/post/addGrainID.py index 462b9dbb4..96ed6459b 100755 --- a/processing/post/addGrainID.py +++ b/processing/post/addGrainID.py @@ -127,13 +127,14 @@ toRadians = math.pi/180.0 if options.degrees else 1.0 # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------- @@ -296,4 +297,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addIPFcolor.py b/processing/post/addIPFcolor.py index 9d34d012c..a6c16ce1c 100755 --- a/processing/post/addIPFcolor.py +++ b/processing/post/addIPFcolor.py @@ -83,13 +83,14 @@ pole /= np.linalg.norm(pole) # --- loop over input files ------------------------------------------------------------------------ -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -135,4 +136,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addMapped.py b/processing/post/addMapped.py index 2105a6dfd..738b8746e 100755 --- a/processing/post/addMapped.py +++ b/processing/post/addMapped.py @@ -48,7 +48,8 @@ if options.map == None: if options.asciitable != None and os.path.isfile(options.asciitable): - mappedTable = damask.ASCIItable(name = options.asciitable,buffered = False, readonly = True) + mappedTable = damask.ASCIItable(name = options.asciitable, + buffered = False, readonly = True) mappedTable.head_read() # read ASCII header info of mapped table missing_labels = mappedTable.data_readArray(options.label) @@ -60,13 +61,14 @@ else: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -100,6 +102,5 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new mappedTable.close() # close mapped input ASCII table diff --git a/processing/post/addMises.py b/processing/post/addMises.py index 9122b4181..56a8d38da 100755 --- a/processing/post/addMises.py +++ b/processing/post/addMises.py @@ -49,13 +49,14 @@ if len(options.stress+options.strain) == 0: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -103,4 +104,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close input ASCII table (works for stdin) - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addNorm.py b/processing/post/addNorm.py index ccc5fdf80..ba4ba5a5c 100755 --- a/processing/post/addNorm.py +++ b/processing/post/addNorm.py @@ -49,13 +49,14 @@ if options.label == None: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -99,4 +100,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close input ASCII table (works for stdin) - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addOrientations.py b/processing/post/addOrientations.py index c93668324..c04b70231 100755 --- a/processing/post/addOrientations.py +++ b/processing/post/addOrientations.py @@ -94,13 +94,14 @@ r = damask.Quaternion().fromAngleAxis(toRadians*options.rotation[0],options.rota # --- loop over input files ------------------------------------------------------------------------ -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -157,4 +158,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py index 15c940b79..d05ec4ec4 100755 --- a/processing/post/addPK2.py +++ b/processing/post/addPK2.py @@ -36,13 +36,14 @@ parser.set_defaults(defgrad = 'f', # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -82,4 +83,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close input ASCII table (works for stdin) - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addPole.py b/processing/post/addPole.py index a7261ec72..add8b877c 100755 --- a/processing/post/addPole.py +++ b/processing/post/addPole.py @@ -81,15 +81,16 @@ toRadians = math.pi/180.0 if options.degrees else 1.0 pole = np.array(options.pole) pole /= np.linalg.norm(pole) -# --- loop over input files ------------------------------------------------------------------------ +# --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -139,4 +140,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addQuaternions.py b/processing/post/addQuaternions.py index b395a0570..1ef209980 100755 --- a/processing/post/addQuaternions.py +++ b/processing/post/addQuaternions.py @@ -55,21 +55,17 @@ datainfo = {'len':4, if options.frame != None: datainfo['label'] += options.frame # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: - filenames = ['STDIN'] + +if filenames == []: filenames = [None] for name in filenames: - if name == 'STDIN': - file = {'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr} - file['croak'].write('\033[1m'+scriptName+'\033[0m\n') - else: - if not os.path.exists(name): continue - file = {'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr} - file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) - table = damask.ASCIItable(file['input'],file['output'],buffered=False) # make unbuffered ASCII_table table.head_read() # read ASCII header info - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # --------------- figure out columns to process --------------------------------------------------- active = [] @@ -81,10 +77,11 @@ for name in filenames: active.append(label) column[label] = table.labels.index(key) # remember columns of requested data else: - file['croak'].write('column %s not found...\n'%label) + table.croak('column %s not found...'%label) # ------------------------------------------ assemble header --------------------------------------- + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.labels_append(['Q_%i'%(i+1) for i in xrange(4)]) # extend ASCII header with new labels [1 real, 3 imaginary components] table.head_write() @@ -117,8 +114,4 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- outputAlive and table.output_flush() # just in case of buffered ASCII table - table.input_close() # close input ASCII table (works for stdin) - table.output_close() # close output ASCII table (works for stdout) - if file['name'] != 'STDIN': - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new - + table.close() # close ASCII tables diff --git a/processing/post/addSpectralDecomposition.py b/processing/post/addSpectralDecomposition.py index 113aca3c7..27ce5f3ec 100755 --- a/processing/post/addSpectralDecomposition.py +++ b/processing/post/addSpectralDecomposition.py @@ -31,13 +31,14 @@ if options.tensor == None: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -85,4 +86,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close input ASCII table (works for stdin) - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/addStrainTensors.py b/processing/post/addStrainTensors.py index 3b9ae2ae6..2959ca83e 100755 --- a/processing/post/addStrainTensors.py +++ b/processing/post/addStrainTensors.py @@ -81,13 +81,14 @@ if options.defgrad == None: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -161,4 +162,3 @@ for name in filenames: # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/post/averageDown.py b/processing/post/averageDown.py index d766000fb..b30cbcc0a 100755 --- a/processing/post/averageDown.py +++ b/processing/post/averageDown.py @@ -3,6 +3,7 @@ import os,sys,string import numpy as np +import scipy.ndimage from optparse import OptionParser import damask @@ -19,131 +20,133 @@ Average each data block of size 'packing' into single values thus reducing the f """, version = scriptID) parser.add_option('-c','--coordinates', - dest='coords', - metavar='string', - help='column heading for coordinates [%default]') + dest = 'coords', + type = 'string', metavar = 'string', + help = 'column heading for coordinates [%default]') parser.add_option('-p','--packing', - dest='packing', - type='int', nargs=3, - metavar='int int int', - help='size of packed group [%default]') + dest = 'packing', + type = 'int', nargs = 3, metavar = 'int int int', + help = 'size of packed group [%default]') parser.add_option('--shift', - dest='shift', - type='int', nargs=3, - metavar='int int int', - help='shift vector of packing stencil [%default]') + dest = 'shift', + type = 'int', nargs = 3, metavar = 'int int int', + help = 'shift vector of packing stencil [%default]') parser.add_option('-g', '--grid', - dest='grid', - type='int', nargs=3, - metavar='int int int', - help='grid in x,y,z [autodetect]') -parser.add_option('-s', '--size', dest='size', type='float', nargs=3, metavar='float float float', - help='size in x,y,z [autodetect]') + dest = 'grid', + type = 'int', nargs = 3, metavar = 'int int int', + help = 'grid in x,y,z [autodetect]') +parser.add_option('-s', '--size', + dest = 'size', + type = 'float', nargs = 3, metavar = 'float float float', + help = 'size in x,y,z [autodetect]') parser.set_defaults(coords = 'ipinitialcoord', packing = (2,2,2), shift = (0,0,0), grid = (0,0,0), - size = (0.0,0.0,0.0)) + size = (0.0,0.0,0.0), + ) (options,filenames) = parser.parse_args() -options.packing = np.array(options.packing) -options.shift = np.array(options.shift) +packing = np.array(options.packing,dtype = int) +shift = np.array(options.shift, dtype = int) -prefix = 'averagedDown%ix%ix%i_'%(options.packing[0],options.packing[1],options.packing[2]) -if np.any(options.shift != 0): - prefix += 'shift%+i%+i%+i_'%(options.shift[0],options.shift[1],options.shift[2]) +prefix = 'averagedDown{}x{}x{}_'.format(*packing) +if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift) -# --- loop over input files ------------------------------------------------------------------------- +# --- loop over input files ------------------------------------------------------------------------ -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = prefix+name, - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + outname = os.path.join(os.path.dirname(name), + prefix+os.path.basename(name)) if name else name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) -# ------------------------------------------ read header ------------------------------------------- +# ------------------------------------------ read header ------------------------------------------ table.head_read() -# --------------- figure out size and grid --------------------------------------------------------- - try: - elemCol = table.labels.index('elem') - locationCol = table.labels.index('1_%s'%options.coords) # columns containing location data - except ValueError: - try: - locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data (legacy naming scheme) - except ValueError: - table.croak('no coordinate (1_%s/%s.x) and/or elem data found...\n'%(options.coords,options.coords)) - continue +# ------------------------------------------ sanity checks ---------------------------------------- - if (any(options.grid)==0 or any(options.size)==0.0): - coords = [{},{},{}] - while table.data_read(): # read next data line of ASCII table - for j in xrange(3): - coords[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z - grid = np.array([len(coords[0]),\ - len(coords[1]),\ - len(coords[2]),],'i') # resolution is number of distinct coordinates found - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness - origin = np.array([min(map(float,coords[0].keys())),\ - min(map(float,coords[1].keys())),\ - min(map(float,coords[2].keys())),\ - ],'d') - 0.5 * size / grid - else: - grid = np.array(options.grid,'i') - size = np.array(options.size,'d') - origin = np.zeros(3,'d') - - for i, res in enumerate(grid): - if res == 1: - options.packing[i] = 1 - options.shift[i] = 0 - mask = np.ones(3,dtype=bool) - mask[i]=0 - size[i] = min(size[mask]/grid[mask]) # third spacing equal to smaller of other spacing + errors = [] + remarks = [] + colCoord = None - packing = np.array(options.packing,'i') - shift = np.array(options.shift,'i') - downSized = np.maximum(np.ones(3,'i'),grid//packing) - outSize = np.ceil(np.array(grid,'d')/np.array(packing,'d')) - + if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + else: colCoord = table.label_index(options.coords) + + if remarks != []: table.croak(remarks) + if errors != []: + table.croak(errors) + table.close(dismiss = True) + continue + + # ------------------------------------------ assemble header --------------------------------------- + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.head_write() -# ------------------------------------------ process data ------------------------------------------ - table.data_rewind() - data = np.zeros(outSize.tolist()+[len(table.labels)]) - p = np.zeros(3,'i') +# --------------- figure out size and grid --------------------------------------------------------- + + table.data_readArray() + + if (any(options.grid) == 0 or any(options.size) == 0.0): + coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + delta = size/np.maximum(np.ones(3,'d'), grid) + origin = mincorner - 0.5*delta # shift from cell center to corner + + else: + grid = np.array(options.grid,'i') + size = np.array(options.size,'d') + origin = np.zeros(3,'d') + + packing = np.where(grid == 1,1,packing) # reset packing to 1 where grid==1 + shift = np.where(grid == 1,0,shift) # reset shift to 0 where grid==1 + packedGrid = np.maximum(np.ones(3,'i'),grid//packing) + + averagedDown = scipy.ndimage.filters.uniform_filter( \ + np.roll( + np.roll( + np.roll(table.data.reshape(list(grid)+[table.data.shape[1]],order = 'F'), + -shift[0],axis = 0), + -shift[1],axis = 1), + -shift[2],axis = 2), + size = list(packing) + [1], + mode = 'wrap', + origin = list(-(packing/2)) + [0])\ + [::packing[0],::packing[1],::packing[2],:].reshape((packedGrid.prod(),table.data.shape[1]),order = 'F') + - for p[2] in xrange(grid[2]): - for p[1] in xrange(grid[1]): - for p[0] in xrange(grid[0]): - d = ((p-shift)%grid)//packing - table.data_read() - data[d[0],d[1],d[2],:] += np.array(table.data_asFloat(),'d') # convert to np array - - data /= packing.prod() + table.data = averagedDown - elementSize = size/grid*packing - posOffset = (shift+[0.5,0.5,0.5])*elementSize - elem = 1 - for c in xrange(downSized[2]): - for b in xrange(downSized[1]): - for a in xrange(downSized[0]): - for i,x in enumerate([a,b,c]): - data[a,b,c,locationCol+i] = posOffset[i] + x*elementSize[i] + origin[i] - data[a,b,c,elemCol] = elem - table.data = data[a,b,c,:].tolist() - outputAlive = table.data_write() # output processed line - elem += 1 +#--- generate grid -------------------------------------------------------------------------------- + if colCoord: + x = (0.5 + shift[0] + np.arange(packedGrid[0],dtype=float))/packedGrid[0]*size[0] + origin[0] + y = (0.5 + shift[1] + np.arange(packedGrid[1],dtype=float))/packedGrid[1]*size[1] + origin[1] + z = (0.5 + shift[2] + np.arange(packedGrid[2],dtype=float))/packedGrid[2]*size[2] + origin[2] + + xx = np.tile( x, packedGrid[1]* packedGrid[2]) + yy = np.tile(np.repeat(y,packedGrid[0] ),packedGrid[2]) + zz = np.repeat(z,packedGrid[0]*packedGrid[1]) + + table.data[:,colCoord:colCoord+3] = np.squeeze(np.dstack((xx,yy,zz))) + +# ------------------------------------------ output result ----------------------------------------- + + table.data_writeArray() + # ------------------------------------------ output finalization ----------------------------------- table.close() # close ASCII tables diff --git a/processing/post/averageTable.py b/processing/post/averageTable.py index 5d1a704de..a394465b2 100755 --- a/processing/post/averageTable.py +++ b/processing/post/averageTable.py @@ -34,14 +34,15 @@ if options.label == None: # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, - outname = options.label+'_averaged_'+name, - buffered = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + outname = options.label+'_averaged_'+name if name else name, + buffered = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ sanity checks --------------------------------------- diff --git a/processing/post/binXY.py b/processing/post/binXY.py index 82e524aa2..a65f815b2 100755 --- a/processing/post/binXY.py +++ b/processing/post/binXY.py @@ -90,10 +90,11 @@ if filenames == []: filenames = [None] for name in filenames: try: table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - 'binned-%s-%s_'%(options.data[0],options.data[1])+ \ - ('weighted-%s_'%(options.weight) if options.weight != None else '') + \ - os.path.basename(name)), buffered = False) + outname = os.path.join(os.path.dirname(name), + 'binned-{}-{}_'.format(*options.data)+ \ + ('weighted-{}_'.format(options.weight) if options.weight else '') + \ + os.path.basename(name)) if name else name, + buffered = False) except: continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) diff --git a/processing/post/imageData.py b/processing/post/imageData.py index 42f6bf092..1c4ee99d8 100755 --- a/processing/post/imageData.py +++ b/processing/post/imageData.py @@ -114,9 +114,9 @@ if filenames == []: filenames = [None] for name in filenames: try: table = damask.ASCIItable(name = name, - buffered = False, - labeled = options.label != None, - readonly = True) + buffered = False, + labeled = options.label != None, + readonly = True) except: continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) @@ -174,7 +174,7 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- - im.save(sys.stdout if name else + im.save(sys.stdout if not name else os.path.splitext(name)[0]+ \ ('' if options.label == None else '_'+options.label)+ \ '.png', diff --git a/processing/post/imageDataDeformed.py b/processing/post/imageDataDeformed.py index 4616b0183..42c913ec5 100755 --- a/processing/post/imageDataDeformed.py +++ b/processing/post/imageDataDeformed.py @@ -106,16 +106,16 @@ theColors = np.uint8(np.array(theMap.export(format='list',steps=256))*255) # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, - outname = None, - buffered = False, - labeled = options.label != None, - readonly = True) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, + labeled = options.label != None, + readonly = True) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -184,10 +184,9 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- - im.save(sys.stdout if name == 'STDIN' else - os.path.splitext(name)[0]+ \ - ('' if options.label == None else '_'+options.label)+ \ - '.png', + im.save(os.path.splitext(name)[0]+ \ + ('_'+options.label if options.label else '')+ \ + '.png' if name else sys.stdout, format = "PNG") table.close() # close ASCII table diff --git a/processing/post/imageDataRGB.py b/processing/post/imageDataRGB.py index 0aa7f75da..1f4d7c4c6 100755 --- a/processing/post/imageDataRGB.py +++ b/processing/post/imageDataRGB.py @@ -74,16 +74,16 @@ if options.pixelsize > 1: (options.pixelsizex,options.pixelsizey) = [options.pix # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, - outname = None, - buffered = False, - labeled = options.label != None, - readonly = True) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, + labeled = options.label != None, + readonly = True) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # ------------------------------------------ read header ------------------------------------------ @@ -126,10 +126,9 @@ for name in filenames: # ------------------------------------------ output result ----------------------------------------- - im.save(sys.stdout if name == 'STDIN' else - os.path.splitext(name)[0]+ \ - ('' if options.label == None else '_'+options.label)+ \ - '.png', + im.save(os.path.splitext(name)[0]+ \ + ('_'+options.label if options.label else '')+ \ + '.png' if name else sys.stdout, format = "PNG") table.close() # close ASCII table diff --git a/processing/pre/geom_fromEuclideanDistance.py b/processing/pre/geom_fromEuclideanDistance.py index 90936a3db..e0652c3a0 100755 --- a/processing/pre/geom_fromEuclideanDistance.py +++ b/processing/pre/geom_fromEuclideanDistance.py @@ -131,8 +131,7 @@ for name in filenames: try: table = damask.ASCIItable(name = name, buffered = False, labeled = False, readonly = True) - except: - continue + except: continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -173,8 +172,6 @@ for name in filenames: p[2]+1] = 1 convoluted[i,:,:,:] = ndimage.convolve(structure,stencil) -# distance = np.ones(info['grid'],'d') - convoluted = np.sort(convoluted,axis = 0) uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) @@ -186,20 +183,15 @@ for name in filenames: for feature in feature_list: try: - table = damask.ASCIItable(outname = features[feature]['alias'][0]+'_'+name, + table = damask.ASCIItable(outname = features[feature]['alias'][0]+'_'+name if name else name, buffered = False, labeled = False) - except: - continue + except: continue table.croak(features[feature]['alias'][0]) distance = np.where(uniques >= features[feature]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present distance = ndimage.morphology.distance_transform_edt(distance)*[options.scale]*3 -# for i in xrange(len(feature_list)): -# distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 - -# for i,feature in enumerate(feature_list): info['microstructures'] = int(math.ceil(distance.max())) #--- write header --------------------------------------------------------------------------------- @@ -215,7 +207,6 @@ for name in filenames: ]) table.labels_clear() table.head_write() - table.output_flush() # --- write microstructure information ------------------------------------------------------------ diff --git a/processing/pre/geom_fromMinimalSurface.py b/processing/pre/geom_fromMinimalSurface.py index c6c405d67..a35aa4961 100755 --- a/processing/pre/geom_fromMinimalSurface.py +++ b/processing/pre/geom_fromMinimalSurface.py @@ -79,8 +79,7 @@ for name in filenames: try: table = damask.ASCIItable(outname = name, buffered = False, labeled = False) - except: - continue + except: continue table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) @@ -126,6 +125,7 @@ for name in filenames: table.head_write() #--- write data ----------------------------------------------------------------------------------- + X = options.periods*2.0*math.pi*(np.arange(options.grid[0])+0.5)/options.grid[0] Y = options.periods*2.0*math.pi*(np.arange(options.grid[1])+0.5)/options.grid[1] Z = options.periods*2.0*math.pi*(np.arange(options.grid[2])+0.5)/options.grid[2] diff --git a/processing/pre/geom_fromTable.py b/processing/pre/geom_fromTable.py index 429e6383e..acaf597f6 100755 --- a/processing/pre/geom_fromTable.py +++ b/processing/pre/geom_fromTable.py @@ -96,9 +96,10 @@ input = [options.eulers != None, options.c != None, options.matrix != None, options.quaternion != None, + options.microstructure != None, ] -if np.sum(input) != 1 and options.microstructure == None: +if np.sum(input) != 1: parser.error('need either microstructure label or exactly one orientation input format.') if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y','+y','-y','z','+z','-z'])): parser.error('invalid axes {} {} {}.'.format(*options.axes)) @@ -107,6 +108,7 @@ if options.axes != None and not set(options.axes).issubset(set(['x','+x','-x','y ([options.a,options.b,options.c],[3,3,3],'frame'), (options.matrix,9,'matrix'), (options.quaternion,4,'quaternion'), + (options.microstructure,1,'microstructure'), ][np.where(input)[0][0]] # select input label that was requested toRadians = math.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians options.tolerance *= toRadians # ensure angular tolerance in radians @@ -129,105 +131,121 @@ for name in filenames: # ------------------------------------------ sanity checks --------------------------------------- + coordDim = table.label_dimension(options.coordinates) + errors = [] - if not 3 >= table.label_dimension(options.coordinates) >= 2: # TODO need to deal with 3D case!! + if not 3 >= coordDim >= 2: errors.append('coordinates {} need to have two or three dimensions.'.format(options.coordinates)) if not np.all(table.label_dimension(label) == dim): - errors.append('orientation {} needs to have dimension {}.'.format(label,dim)) + errors.append('input {} needs to have dimension {}.'.format(label,dim)) if options.phase != None and table.label_dimension(options.phase) != 1: errors.append('phase column {} is not scalar.'.format(options.phase)) - if errors == []: # so far no errors? - table.data_readArray([options.coordinates,label]+([] if options.phase == None else [options.phase])) - - if options.phase == None: - table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given - - coordsX = np.unique(table.data[:,0]) - coordsY = np.unique(table.data[:,1]) - nX = len(coordsX) - nY = len(coordsY) - dX = (coordsX[-1]-coordsX[0])/(nX-1) - dY = (coordsY[-1]-coordsY[0])/(nY-1) + if errors != []: + table.croak(errors) + table.close(dismiss = True) + continue - if nX*nY != len(table.data) \ - or np.any(np.abs(np.log10((coordsX[1:]-coordsX[:-1])/dX)) > 0.01) \ - or np.any(np.abs(np.log10((coordsY[1:]-coordsY[:-1])/dY)) > 0.01): - errors.append('data is not on square grid.') + table.data_readArray([options.coordinates,label]+([] if options.phase == None else [options.phase])) - if errors != []: + if coordDim == 2: + table.data = np.insert(table.data,2,np.zeros(len(table.data)),axis=1) # add zero z coordinate for two-dimensional input + if options.phase == None: + table.data = np.column_stack((table.data,np.ones(len(table.data)))) # add single phase if no phase column given + +# --------------- figure out size and grid --------------------------------------------------------- + + coords = [np.unique(table.data[:,i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other spacings + delta = size/np.maximum(np.ones(3,'d'), grid) + origin = mincorner - 0.5*delta # shift from cell center to corner + + N = grid.prod() + + if N != len(table.data): + errors.append('data count {} does not match grid {}.'.format(len(table.data),' x '.join(map(repr,grid)))) + if np.any(np.abs(np.log10((coords[0][1:]-coords[0][:-1])/delta[0])) > 0.01) \ + or np.any(np.abs(np.log10((coords[1][1:]-coords[1][:-1])/delta[1])) > 0.01) \ + or np.any(np.abs(np.log10((coords[2][1:]-coords[2][:-1])/delta[2])) > 0.01): + errors.append('regular grid spacing {} violated.'.format(' x '.join(map(repr,delta)))) + + if errors != []: table.croak(errors) table.close(dismiss = True) continue # ------------------------------------------ process data ------------------------------------------ + + colOri = table.label_index(label)+(3-coordDim) # column(s) of orientation data (following 3 or 2 coordinates that were expanded to 3!) + + if inputtype == 'microstructure': + microstructure = table.data[:,colOri] + nGrains = len(np.unique(microstructure)) + else: + colPhase = colOri + np.sum(dim) # column of phase data comes after orientation + index = np.lexsort((table.data[:,0],table.data[:,1],table.data[:,2])) # index of rank when sorting x fast, z slow + rank = np.argsort(index) # rank of index + KDTree = scipy.spatial.KDTree((table.data[:,:3]-mincorner) / delta) # build KDTree with dX = dY = dZ = 1 and origin 0,0,0 - colOri = table.label_index(label) # column(s) of orientation data - colPhase = colOri + np.sum(dim) # column of phase data comes after orientation - index = np.lexsort((table.data[:,0],table.data[:,1])) # index of rank when sorting x fast, y slow - rank = np.argsort(index) # rank of index - KDTree = scipy.spatial.KDTree((table.data[:,:2]-np.array([coordsX[0],coordsY[0]])) \ - / np.array([dX,dY])) # build KDTree with dX = dY = 1 - - microstructure = np.zeros(nX*nY,dtype = 'uint32') # initialize empty microstructure - symQuats = [] # empty list of sym equiv orientations - phases = [] # empty list of phase info - nGrains = 0 # counter for detected grains - myRank = 0 # rank of current grid point - for y in xrange(nY): - for x in xrange(nX): - if (myRank+1)%(nX*nY/100.) < 1: table.croak('.',False) - myData = table.data[index[myRank]] - mySym = options.symmetry[min(int(myData[colPhase]),len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index) - if inputtype == 'eulers': - o = damask.Orientation(Eulers = np.array(map(float,myData[colOri:colOri+3]))*toRadians, - symmetry = mySym).reduced() - elif inputtype == 'matrix': - o = damask.Orientation(matrix = np.array([map(float,myData[colOri:colOri+9])]).reshape(3,3).transpose(), - symmetry = mySym).reduced() - elif inputtype == 'frame': - o = damask.Orientation(matrix = np.array([map(float,myData[colOri[0]:colOri[0]+3] + \ - myData[colOri[1]:colOri[1]+3] + \ - myData[colOri[2]:colOri[2]+3] - )]).reshape(3,3), - symmetry = mySym).reduced() - elif inputtype == 'quaternion': - o = damask.Orientation(quaternion = np.array(map(float,myData[colOri:colOri+4])), - symmetry = mySym).reduced() + microstructure = np.zeros(N,dtype = 'uint32') # initialize empty microstructure + symQuats = [] # empty list of sym equiv orientations + phases = [] # empty list of phase info + nGrains = 0 # counter for detected grains + myRank = 0 # rank of current grid point + for z in xrange(grid[2]): + for y in xrange(grid[1]): + for x in xrange(grid[0]): + if (myRank+1)%(N/100.) < 1: table.croak('.',False) + myData = table.data[index[myRank]] + mySym = options.symmetry[min(int(myData[colPhase]),len(options.symmetry))-1] # select symmetry from option (take last specified option for all with higher index) + if inputtype == 'eulers': + o = damask.Orientation(Eulers = myData[colOri:colOri+3]*toRadians, + symmetry = mySym).reduced() + elif inputtype == 'matrix': + o = damask.Orientation(matrix = myData[colOri:colOri+9].reshape(3,3).transpose(), + symmetry = mySym).reduced() + elif inputtype == 'frame': + o = damask.Orientation(matrix = np.hstack((myData[colOri[0]:colOri[0]+3], + myData[colOri[1]:colOri[1]+3], + myData[colOri[2]:colOri[2]+3], + )).reshape(3,3), + symmetry = mySym).reduced() + elif inputtype == 'quaternion': + o = damask.Orientation(quaternion = myData[colOri:colOri+4], + symmetry = mySym).reduced() - oInv = o.quaternion.conjugated() - neighbors = KDTree.query_ball_point([x,y], 3) # search points within radius - breaker = False + oInv = o.quaternion.conjugated() + neighbors = KDTree.query_ball_point([x,y,z], 3) # search points within radius + breaker = False - for n in neighbors: # check each neighbor - if myRank <= rank[n] or table.data[n,colPhase] != myData[colPhase]: continue # skip myself, anyone further ahead (cannot yet have a grain ID), and other phases - for q in symQuats[microstructure[rank[n]]-1]: - if abs((q*oInv).asAngleAxis()[0]) <= options.tolerance: # found existing orientation resembling me - microstructure[myRank] = microstructure[rank[n]] - breaker = True; break - if breaker: break + for n in neighbors: # check each neighbor + if myRank <= rank[n] or table.data[n,colPhase] != myData[colPhase]: continue # skip myself, anyone further ahead (cannot yet have a grain ID), and other phases + for q in symQuats[microstructure[rank[n]]-1]: + if abs((q*oInv).asAngleAxis()[0]) <= options.tolerance: # found existing orientation resembling me + microstructure[myRank] = microstructure[rank[n]] + breaker = True; break + if breaker: break - if microstructure[myRank] == 0: # no other orientation resembled me - nGrains += 1 # make new grain ... - microstructure[myRank] = nGrains # ... and assign to me - symQuats.append(o.equivalentQuaternions()) # store all symmetrically equivalent orientations for future comparison - phases.append(myData[colPhase]) # store phase info for future reporting + if microstructure[myRank] == 0: # no other orientation resembled me + nGrains += 1 # make new grain ... + microstructure[myRank] = nGrains # ... and assign to me + symQuats.append(o.equivalentQuaternions()) # store all symmetrically equivalent orientations for future comparison + phases.append(myData[colPhase]) # store phase info for future reporting - myRank += 1 + myRank += 1 - table.croak('') + table.croak('') # --- generate header ---------------------------------------------------------------------------- info = { - 'grid': np.array([nX,nY,1]), - 'size': np.array([coordsX[-1]-coordsX[0], - coordsY[-1]-coordsY[0], - min((coordsX[-1]-coordsX[0])/nX, - (coordsY[-1]-coordsY[0])/nY, - ) - ]), - 'origin': np.array([coordsX[0],coordsY[0],0.0]), + 'grid': grid, + 'size': size, + 'origin': origin, 'microstructures': nGrains, 'homogenization': options.homogenization, } @@ -243,29 +261,32 @@ for name in filenames: formatwidth = 1+int(math.log10(info['microstructures'])) - config_header = [''] - for i,phase in enumerate(phases): - config_header += ['[Grain%s]'%(str(i+1).zfill(formatwidth)), - 'crystallite %i'%options.crystallite, - '(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase,str(i+1).rjust(formatwidth)), - ] + if inputtype == 'microstructure': + config_header = [] + else: + config_header = [''] + for i,phase in enumerate(phases): + config_header += ['[Grain%s]'%(str(i+1).zfill(formatwidth)), + 'crystallite %i'%options.crystallite, + '(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase,str(i+1).rjust(formatwidth)), + ] - config_header += [''] - for i,quats in enumerate(symQuats): - config_header += ['[Grain%s]'%(str(i+1).zfill(formatwidth)), - 'axes\t%s %s %s'%tuple(options.axes) if options.axes != None else '', - '(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(np.degrees(quats[0].asEulers())), - ] + config_header += [''] + for i,quats in enumerate(symQuats): + config_header += ['[Grain%s]'%(str(i+1).zfill(formatwidth)), + 'axes\t%s %s %s'%tuple(options.axes) if options.axes != None else '', + '(gauss)\tphi1 %g\tPhi %g\tphi2 %g\tscatter 0.0\tfraction 1.0'%tuple(np.degrees(quats[0].asEulers())), + ] table.labels_clear() table.info_clear() table.info_append([ scriptID + ' ' + ' '.join(sys.argv[1:]), - "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']), - "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']), - "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']), - "homogenization\t{homog}".format(homog=info['homogenization']), - "microstructures\t{microstructures}".format(microstructures=info['microstructures']), + "grid\ta {}\tb {}\tc {}".format(*info['grid']), + "size\tx {}\ty {}\tz {}".format(*info['size']), + "origin\tx {}\ty {}\tz {}".format(*info['origin']), + "homogenization\t{}".format(info['homogenization']), + "microstructures\t{}".format(info['microstructures']), config_header, ]) table.head_write() diff --git a/processing/pre/geom_pack.py b/processing/pre/geom_pack.py index 78053ce84..c9be3c72f 100755 --- a/processing/pre/geom_pack.py +++ b/processing/pre/geom_pack.py @@ -22,13 +22,14 @@ compress geometry files with ranges "a to b" and/or multiples "n of x". # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -63,7 +64,6 @@ for name in filenames: "microstructures\t{microstructures}".format(microstructures=info['microstructures']), ]) table.head_write() - table.output_flush() # --- write packed microstructure information ----------------------------------------------------- @@ -116,4 +116,3 @@ for name in filenames: # --- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/pre/geom_rescale.py b/processing/pre/geom_rescale.py index 4cb809dfc..05d615990 100755 --- a/processing/pre/geom_rescale.py +++ b/processing/pre/geom_rescale.py @@ -41,13 +41,14 @@ parser.set_defaults(renumber = False, # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -144,7 +145,6 @@ for name in filenames: ]) table.labels_clear() table.head_write() - table.output_flush() # --- write microstructure information ------------------------------------------------------------ @@ -155,4 +155,3 @@ for name in filenames: # --- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/pre/geom_toTable.py b/processing/pre/geom_toTable.py index 25909a1f4..3e9f42a72 100755 --- a/processing/pre/geom_toTable.py +++ b/processing/pre/geom_toTable.py @@ -31,7 +31,7 @@ parser.set_defaults(position = 'pos', # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: try: diff --git a/processing/pre/geom_translate.py b/processing/pre/geom_translate.py index 61898589d..b0fef381a 100755 --- a/processing/pre/geom_translate.py +++ b/processing/pre/geom_translate.py @@ -44,13 +44,14 @@ for i in xrange(len(options.substitute)/2): # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -111,7 +112,6 @@ for name in filenames: "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']), ]) table.head_write() - table.output_flush() # --- write microstructure information ------------------------------------------------------------ @@ -122,4 +122,3 @@ for name in filenames: # --- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/pre/geom_unpack.py b/processing/pre/geom_unpack.py index c06dd5dfe..671d7b5ff 100755 --- a/processing/pre/geom_unpack.py +++ b/processing/pre/geom_unpack.py @@ -29,13 +29,14 @@ parser.set_defaults(oneD = False, # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -70,7 +71,6 @@ for name in filenames: "microstructures\t{microstructures}".format(microstructures=info['microstructures']), ]) table.head_write() - table.output_flush() # --- write microstructure information ------------------------------------------------------------ @@ -83,4 +83,3 @@ for name in filenames: #--- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/pre/geom_vicinityOffset.py b/processing/pre/geom_vicinityOffset.py index f8a198de5..74c4377ff 100755 --- a/processing/pre/geom_vicinityOffset.py +++ b/processing/pre/geom_vicinityOffset.py @@ -38,13 +38,14 @@ parser.set_defaults(vicinity = 1, # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -103,7 +104,6 @@ for name in filenames: "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']), ]) table.head_write() - table.output_flush() # --- write microstructure information ------------------------------------------------------------ @@ -114,4 +114,3 @@ for name in filenames: # --- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': os.rename(name+'_tmp',name) # overwrite old one with tmp new diff --git a/processing/pre/hybridIA_linODFsampling.py b/processing/pre/hybridIA_linODFsampling.py index 205986caf..408b0b73b 100755 --- a/processing/pre/hybridIA_linODFsampling.py +++ b/processing/pre/hybridIA_linODFsampling.py @@ -280,18 +280,19 @@ methods = [options.algorithm] # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = None, - buffered = False, readonly = True) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, readonly = True) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) randomSeed = int(os.urandom(4).encode('hex'), 16) if options.randomSeed == None else options.randomSeed # random seed per file for second phase random.seed(randomSeed) -# ------------------------------------------ read header --------------------------------------- +# ------------------------------------------ read header ------------------------------------------ table.head_read() @@ -305,26 +306,19 @@ for name in filenames: table.close(dismiss = True) continue -# ------------------------------------------ read data --------------------------------------- +# ------------------------------------------ read data -------------------------------------------- binnedODF = table.data_readArray(labels) # --------------- figure out limits (left/right), delta, and interval ----------------------------- + ODF = {} limits = np.array([np.min(table.data,axis=0), np.max(table.data,axis=0)]) ODF['limit'] = np.radians(limits[1,:]) + ODF['center'] = 0.0 if all(limits[0,:]<1e-8) else 0.5 # vertex or cell centered - if all(limits[0,:]<1e-8): # vertex centered - ODF['center'] = 0.0 - else: # cell centered - ODF['center'] = 0.5 - - eulers = [{},{},{}] - for i in xrange(table.data.shape[0]): - for j in xrange(3): - eulers[j][str(table.data[i,j]])] = True # remember eulers along phi1, Phi, and phi2 - ODF['interval'] = np.array([len(eulers[0]),len(eulers[1]),len(eulers[2]),],'i') # steps are number of distict values + ODF['interval'] = np.array(map(len,[np.unique(table.data[:,i]) for i in xrange(3)]),'i') # steps are number of distict values ODF['nBins'] = ODF['interval'].prod() ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1)) diff --git a/processing/pre/mentat_spectralBox.py b/processing/pre/mentat_spectralBox.py index bf443e703..08e51f14c 100755 --- a/processing/pre/mentat_spectralBox.py +++ b/processing/pre/mentat_spectralBox.py @@ -177,109 +177,90 @@ def initial_conditions(homogenization,microstructures): return cmds -#------------------------------------------------------------------------------------------------- -def parse_geomFile(content,homog): -#------------------------------------------------------------------------------------------------- - (skip,key) = content[0].split()[:2] - if key[:4].lower() == 'head': - skip = int(skip)+1 - else: - skip = 0 - - grid = [0,0,0] - size = [0.0,0.0,0.0] - homog = 0 - - for line in content[:skip]: - data = line.split() - if data[0].lower() == 'grid': - grid = map(int,data[2:8:2]) - if data[0].lower() == 'size': - size = map(float,data[2:8:2]) - if data[0].lower() == 'homogenization': - homog = int(data[1]) - - microstructures = [] - for line in content[skip:]: - for word in line.split(): - microstructures.append(int(word)) - - return (grid,size,homog,microstructures) - #-------------------------------------------------------------------------------------------------- # MAIN #-------------------------------------------------------------------------------------------------- parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Generate MSC.Marc FE hexahedral mesh from spectral description file. +Generate MSC.Marc FE hexahedral mesh from geom file. """, version = scriptID) -parser.add_option('-p', '--port', type='int',dest='port',metavar='int', - help='Mentat connection port [%default]') -parser.add_option('--homogenization', dest='homogenization', type='int', metavar = 'int', - help='homogenization index to be used [%default]') -parser.set_defaults(port = None) -parser.set_defaults(homogenization = 1) +parser.add_option('-p', '--port', + dest = 'port', + type = 'int', metavar = 'int', + help = 'Mentat connection port [%default]') +parser.add_option('--homogenization', + dest = 'homogenization', + type = 'int', metavar = 'int', + help = 'homogenization index to be used [auto]') + +parser.set_defaults(port = None, + homogenization = None, +) (options, filenames) = parser.parse_args() +if options.port: + try: + from py_mentat import * + except: + parser.error('no valid Mentat release found.') -#--- setup file handles -------------------------------------------------------------------------- -files = [] -if filenames == []: - files.append({'name':'STDIN', - 'input':sys.stdin, - 'output':sys.stdout, - 'croak':sys.stderr, - }) -else: - for name in filenames: - if os.path.exists(name): - files.append({'name':name, - 'input':open(name), - 'output':open(name+'_tmp','w'), - 'croak':sys.stdout, - }) +# --- loop over input files ------------------------------------------------------------------------- -try: - from py_mentat import * -except: - file['croak'].write('no valid Mentat release found') - if options.port != None: sys.exit(-1) +if filenames == []: filenames = [None] -#--- loop over input files ------------------------------------------------------------------------ -for file in files: - file['croak'].write('\033[1m' + scriptName + '\033[0m: ' + (file['name'] if file['name'] != 'STDIN' else '') + '\n') +for name in filenames: + try: + table = damask.ASCIItable(name = name, + outname = os.path.splitext(name)[0]+'.proc' if name else name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) + +# --- interpret header ---------------------------------------------------------------------------- + + table.head_read() + info,extra_header = table.head_getGeom() + if options.homogenization: info['homogenization'] = options.homogenization + + table.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))), + 'size x y z: %s'%(' x '.join(map(str,info['size']))), + 'origin x y z: %s'%(' : '.join(map(str,info['origin']))), + 'homogenization: %i'%info['homogenization'], + 'microstructures: %i'%info['microstructures'], + ]) + + errors = [] + if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') + if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') + if errors != []: + table.croak(errors) + table.close(dismiss = True) + continue + +# --- read data ------------------------------------------------------------------------------------ + + microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure - content = file['input'].readlines() - - (grid,size,homog,microstructures) = parse_geomFile(content, options.homogenization) - -#--- report --------------------------------------------------------------------------------------- - file['croak'].write('grid a b c: %s\n'%(' x '.join(map(str,grid))) + - 'size x y z: %s\n'%(' x '.join(map(str,size))) + - 'homogenization: %i\n'%homog + - 'microstructures: %i\n\n'%(len(list(set(microstructures))))) - cmds = [\ init(), - mesh(grid,size), + mesh(info['grid'],info['size']), material(), geometry(), - initial_conditions(homog,microstructures), + initial_conditions(info['homogenization'],microstructure), '*identify_sets', '*show_model', '*redraw', ] outputLocals = {} - if (options.port != None): + if options.port: py_connect('',options.port) output(cmds,outputLocals,'Mentat') py_disconnect() else: - output(cmds,outputLocals,file['output']) - if file['name'] != 'STDIN': - file['output'].close() - os.rename(file['name']+'_tmp',os.path.splitext(file['name'])[0] +'.proc') + output(cmds,outputLocals,table.__IO__['out']) # bad hack into internals of table class... + + table.close() diff --git a/processing/pre/seeds_check.py b/processing/pre/seeds_check.py index 775b85a89..48c51ecdd 100755 --- a/processing/pre/seeds_check.py +++ b/processing/pre/seeds_check.py @@ -34,13 +34,14 @@ parser.set_defaults(size = [0.0,0.0,0.0], # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = None, - buffered = False, readonly = True) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + buffered = False, readonly = True) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -62,13 +63,13 @@ for name in filenames: and np.all(info['grid'] < 1): errors.append('invalid size x y z.') else: for i in xrange(3): - if info['size'][i] <= 0.0: # any invalid size? - info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid + if info['size'][i] <= 0.0: # any invalid size? + info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i])) if table.label_dimension(options.position) != 3: errors.append('columns "{}" have dimension {}'.format(options.position, table.label_dimension(options.position))) if remarks != []: table.croak(remarks) - if errors != []: + if errors != []: table.croak(errors) table.close(dismiss=True) continue @@ -81,14 +82,14 @@ for name in filenames: coords = table.data[:,:3] # assign coordinates grain = table.data[:,3].astype('i') if hasGrains else 1+np.arange(len(coords),dtype='i') # assign grains -# grainIDs = np.unique(grain).astype('i') # find all grainIDs present +# grainIDs = np.unique(grain).astype('i') # find all grainIDs present # --- generate grid -------------------------------------------------------------------------------- grid = vtk.vtkUnstructuredGrid() pts = vtk.vtkPoints() -# --- process microstructure information -------------------------------------------------------------- +# --- process microstructure information ----------------------------------------------------------- IDs = vtk.vtkIntArray() IDs.SetNumberOfComponents(1) @@ -106,7 +107,17 @@ for name in filenames: # --- write data ----------------------------------------------------------------------------------- - if name == 'STDIN': + if name: + (dir,filename) = os.path.split(name) + writer = vtk.vtkXMLUnstructuredGridWriter() + writer.SetDataModeToBinary() + writer.SetCompressorTypeToZLib() + writer.SetFileName(os.path.join(dir,'seeds_'+os.path.splitext(filename)[0] + +'.'+writer.GetDefaultFileExtension())) + if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid) + else: writer.SetInputData(grid) + writer.Write() + else: writer = vtk.vtkUnstructuredGridWriter() writer.WriteToOutputStringOn() writer.SetFileTypeToASCII() @@ -115,15 +126,5 @@ for name in filenames: else: writer.SetInputData(grid) writer.Write() sys.stdout.write(writer.GetOutputString()[0:writer.GetOutputStringLength()]) - else: - (dir,filename) = os.path.split(name) - writer = vtk.vtkXMLUnstructuredGridWriter() - writer.SetDataModeToBinary() - writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(dir,'seeds_'+os.path.splitext(filename)[0] - +'.'+writer.GetDefaultFileExtension())) - if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid) - else: writer.SetInputData(grid) - writer.Write() table.close() diff --git a/processing/pre/seeds_fromPokes.py b/processing/pre/seeds_fromPokes.py index c19d2a6c7..bfa7337ea 100755 --- a/processing/pre/seeds_fromPokes.py +++ b/processing/pre/seeds_fromPokes.py @@ -49,13 +49,15 @@ parser.set_defaults(x = False, # --- loop over output files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: - if not (name == 'STDIN' or os.path.exists(name)): continue - table = damask.ASCIItable(name = name, outname = name+'_tmp', - buffered = False, labeled = False) - table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name != 'STDIN' else '')) + try: + table = damask.ASCIItable(name = name, + outname = os.path.splitext(name])[0]+'_poked_{}.seeds'.format(options.N) if name else name, + buffered = False, labeled = False) + except: continue + table.croak('\033[1m'+scriptName+'\033[0m'+(': '+name if name else '')) # --- interpret header ---------------------------------------------------------------------------- @@ -91,7 +93,7 @@ for name in filenames: Ny = int(options.N/math.sqrt(options.N*info['size'][0]/info['size'][1])) Nz = int((max(options.z)-min(options.z))/info['size'][2]*info['grid'][2]) - table.croak('poking {0} x {1} x {2}...'.format(Nx,Ny,Nz)) + table.croak('poking {} x {} x {}...'.format(Nx,Ny,Nz)) seeds = np.zeros((Nx*Ny*Nz,4),'d') grid = np.zeros(3,'i') @@ -125,11 +127,12 @@ for name in filenames: table.info_clear() table.info_append(extra_header+[ scriptID + ' ' + ' '.join(sys.argv[1:]), - "grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=newInfo['grid']), - "size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=newInfo['size']), - "origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']), - "homogenization\t{homog}".format(homog=info['homogenization']), - "microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']), + "poking\ta {}\tb {}\tc {}".format(Nx,Ny,Nz), + "grid\ta {}\tb {}\tc {}".format(newInfo['grid']), + "size\tx {}\ty {}\tz {}".format(newInfo['size']), + "origin\tx {}\ty {}\tz {}".format(info['origin']), + "homogenization\t{}".format(info['homogenization']), + "microstructures\t{}".format(newInfo['microstructures']), ]) table.labels_clear() table.labels_append(['{dim}_{label}'.format(dim = 1+i,label = options.position) for i in range(3)]+['microstructure']) @@ -144,5 +147,3 @@ for name in filenames: # --- output finalization -------------------------------------------------------------------------- table.close() # close ASCII table - if name != 'STDIN': - os.rename(name+'_tmp',os.path.splitext(name])[0] + '_poked_%ix%ix%i.seeds'%(Nx,Ny,Nz)) diff --git a/processing/pre/seeds_fromRandom.py b/processing/pre/seeds_fromRandom.py index ea01f74f7..be2320d47 100755 --- a/processing/pre/seeds_fromRandom.py +++ b/processing/pre/seeds_fromRandom.py @@ -152,15 +152,15 @@ for name in filenames: if not options.selective: - seeds = np.zeros((3,options.N),dtype=float) # seed positions array + seeds = np.zeros((3,options.N),dtype='d') # seed positions array gridpoints = random.sample(range(gridSize),options.N) # create random permutation of all grid positions and choose first N seeds[0,:] = (np.mod(gridpoints ,options.grid[0])\ - +np.random.random()) /options.grid[0] + +np.random.random(options.N)) /options.grid[0] seeds[1,:] = (np.mod(gridpoints// options.grid[0] ,options.grid[1])\ - +np.random.random()) /options.grid[1] + +np.random.random(options.N)) /options.grid[1] seeds[2,:] = (np.mod(gridpoints//(options.grid[1]*options.grid[0]),options.grid[2])\ - +np.random.random()) /options.grid[2] + +np.random.random(options.N)) /options.grid[2] else: diff --git a/processing/pre/seeds_fromTable.py b/processing/pre/seeds_fromTable.py index d35b17c29..9bfbd2578 100755 --- a/processing/pre/seeds_fromTable.py +++ b/processing/pre/seeds_fromTable.py @@ -56,7 +56,7 @@ if options.blacklist != None: options.blacklist = map(int,options.blacklist) # --- loop over input files ------------------------------------------------------------------------- -if filenames == []: filenames = ['STDIN'] +if filenames == []: filenames = [None] for name in filenames: try: