From c8b3f08743c433c93bb15f14ef4ea74a413742c9 Mon Sep 17 00:00:00 2001 From: Test User Date: Tue, 12 Apr 2016 04:20:06 +0200 Subject: [PATCH 01/20] updated version information after successful test of v2.0.0-95-gd278d86 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7d1fa0440..03f85d164 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.0-93-g0ddc29d +v2.0.0-95-gd278d86 From 8b27de7d163e0da8075e1dd6e1e441b7c6a86364 Mon Sep 17 00:00:00 2001 From: Pratheek Shanthraj Date: Tue, 12 Apr 2016 11:05:01 +0200 Subject: [PATCH 02/20] fix bug in MPI output --- code/DAMASK_spectral.f90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/code/DAMASK_spectral.f90 b/code/DAMASK_spectral.f90 index 45451df08..e999bf5dc 100644 --- a/code/DAMASK_spectral.f90 +++ b/code/DAMASK_spectral.f90 @@ -427,7 +427,7 @@ program DAMASK_spectral ! prepare MPI parallel out (including opening of file) allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND) outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND) - call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_allreduce') call MPI_file_open(PETSC_COMM_WORLD, & trim(getSolverWorkingDirectoryName())//trim(getSolverJobName())//'.spectralOut', & From 284c2783e2ee10413d887d751978c0f6480cf38c Mon Sep 17 00:00:00 2001 From: Test User Date: Tue, 12 Apr 2016 16:19:57 +0200 Subject: [PATCH 03/20] updated version information after successful test of v2.0.0-97-g8b27de7 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 03f85d164..c271e0939 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.0-95-gd278d86 +v2.0.0-97-g8b27de7 From 6e2ca7d59579d7e3ddfd166615db22cab461ed2d Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 17:48:17 -0400 Subject: [PATCH 04/20] Removed hard requirement of 3D dataset. Fills in necessary dimensions. --- processing/post/vtk_rectilinearGrid.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/processing/post/vtk_rectilinearGrid.py b/processing/post/vtk_rectilinearGrid.py index a00d60185..38deabf3a 100755 --- a/processing/post/vtk_rectilinearGrid.py +++ b/processing/post/vtk_rectilinearGrid.py @@ -38,9 +38,9 @@ parser.set_defaults(position ='ipinitialcoord', if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, readonly = True) + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) except: continue damask.util.report(scriptName,name) @@ -48,10 +48,13 @@ for name in filenames: table.head_read() - errors = [] - if table.label_dimension(options.position) != 3: - errors.append('coordinates {} are not a vector.'.format(options.position)) + remarks = [] + errors = [] + coordDim = table.label_dimension(options.position) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.position)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.position)) + if remarks != []: damask.util.croak(remarks) if errors != []: damask.util.croak(errors) table.close(dismiss=True) @@ -60,6 +63,11 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- table.data_readArray(options.position) + if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape + if table.data.shape[1] < 3: + table.data = np.hstack((table.data, + np.zeros((table.data.shape[0], + 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros coords = [np.unique(table.data[:,i]) for i in xrange(3)] if options.mode == 'cell': From feae2164fc18bbbe04decb616b998055b151edf0 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 17:48:49 -0400 Subject: [PATCH 05/20] polishing and now explicitly closing input table. --- processing/post/vtk_addRectilinearGridData.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/processing/post/vtk_addRectilinearGridData.py b/processing/post/vtk_addRectilinearGridData.py index 3ca54d84e..6b78950c7 100755 --- a/processing/post/vtk_addRectilinearGridData.py +++ b/processing/post/vtk_addRectilinearGridData.py @@ -83,9 +83,9 @@ damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Nce if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, readonly = True) + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) except: continue damask.util.report(scriptName, name) @@ -133,6 +133,8 @@ for name in filenames: elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) + table.input_close() # close input ASCII table + # ------------------------------------------ add data --------------------------------------- for datatype,labels in active.items(): # loop over scalar,color From 0b09ce31db22b9f50280fecbb35a4366fad04fff Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 17:49:48 -0400 Subject: [PATCH 06/20] changed coordinate option to be consistent with rectilinearGrid. --- processing/post/vtk_pointcloud.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/processing/post/vtk_pointcloud.py b/processing/post/vtk_pointcloud.py index 7701421fc..f35911135 100755 --- a/processing/post/vtk_pointcloud.py +++ b/processing/post/vtk_pointcloud.py @@ -18,12 +18,12 @@ Produce a VTK point cloud dataset based on coordinates given in an ASCIItable. """, version = scriptID) -parser.add_option('-d', '--deformed', - dest = 'deformed', +parser.add_option('-c', '--coordinates', + dest = 'pos', type = 'string', metavar = 'string', - help = 'deformed coordinate label [%default]') + help = 'coordinate label [%default]') -parser.set_defaults(deformed = 'ipdeformedcoord' +parser.set_defaults(pos = 'pos' ) (options, filenames) = parser.parse_args() @@ -46,9 +46,9 @@ for name in filenames: errors = [] remarks = [] - coordDim = table.label_dimension(options.deformed) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.deformed)) - elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.deformed)) + coordDim = table.label_dimension(options.pos) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.pos)) if remarks != []: damask.util.croak(remarks) if errors != []: @@ -58,7 +58,7 @@ for name in filenames: # ------------------------------------------ process data --------------------------------------- - table.data_readArray(options.deformed) + table.data_readArray(options.pos) if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape if table.data.shape[1] < 3: table.data = np.hstack((table.data, From ea0bc80af997a3cf7df5df0af43100b9b7f5c201 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 18:03:40 -0400 Subject: [PATCH 07/20] modernized to reflect rectilinearGrid processing. Now supports vector data and in-place adding. --- processing/post/vtk_addPointcloudData.py | 211 ++++++++++++++--------- 1 file changed, 125 insertions(+), 86 deletions(-) diff --git a/processing/post/vtk_addPointcloudData.py b/processing/post/vtk_addPointcloudData.py index 95b91d8ee..602e69307 100755 --- a/processing/post/vtk_addPointcloudData.py +++ b/processing/post/vtk_addPointcloudData.py @@ -1,8 +1,9 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,sys,vtk +import os,vtk import damask +from collections import defaultdict from optparse import OptionParser scriptName = os.path.splitext(os.path.basename(__file__))[0] @@ -17,125 +18,163 @@ Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp). """, version = scriptID) -parser.add_option('-v', '--vtk', dest='vtk', \ +parser.add_option( '--vtk', + dest = 'vtk', + type = 'string', metavar = 'string', help = 'VTK file name') +parser.add_option( '--inplace', + dest = 'inplace', + action = 'store_true', + help = 'modify VTK file in-place') +parser.add_option('-r', '--render', + dest = 'render', + action = 'store_true', + help = 'open output in VTK render window') parser.add_option('-s', '--scalar', dest='scalar', action='extend', \ help = 'scalar values') +parser.add_option('-v', '--vector', + dest = 'vector', + action = 'extend', metavar = '', + help = 'vector value label(s)') parser.add_option('-c', '--color', dest='color', action='extend', \ help = 'RGB color tuples') -parser.set_defaults(scalar = []) -parser.set_defaults(color = []) +parser.set_defaults(scalar = [], + vector = [], + color = [], + inplace = False, + render = False, +) (options, filenames) = parser.parse_args() -datainfo = { # list of requested labels per datatype - 'scalar': {'len':1, - 'label':[]}, - 'color': {'len':3, - 'label':[]}, - } - -if not os.path.exists(options.vtk): - parser.error('VTK file does not exist'); sys.exit() +if not options.vtk: parser.error('No VTK file specified.') +if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') reader = vtk.vtkXMLPolyDataReader() reader.SetFileName(options.vtk) reader.Update() -Npoints = reader.GetNumberOfPoints() -Ncells = reader.GetNumberOfCells() +Npoints = reader.GetNumberOfPoints() +Ncells = reader.GetNumberOfCells() Nvertices = reader.GetNumberOfVerts() -Polydata = reader.GetOutput() +Polydata = reader.GetOutput() if Npoints != Ncells or Npoints != Nvertices: - parser.error('Number of points, cells, and vertices in VTK differ from each other'); sys.exit() -if options.scalar is not None: datainfo['scalar']['label'] += options.scalar -if options.color is not None: datainfo['color']['label'] += options.color + parser.error('Number of points, cells, and vertices in VTK differ from each other.') -# ------------------------------------------ setup file handles --------------------------------------- +damask.util.croak('{}: {} points, {} vertices, and {} cells...'.format(options.vtk,Npoints,Nvertices,Ncells)) -files = [] -if filenames == []: - files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) -else: - for name in filenames: - if os.path.exists(name): - files.append({'name':name, 'input':open(name), 'output':sys.stderr, 'croak':sys.stderr}) +# --- loop over input files ------------------------------------------------------------------------- -#--- loop over input files ------------------------------------------------------------------------ -for file in files: - if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') - else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') +if filenames == []: filenames = [None] - table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table - table.head_read() # read ASCII header info +for name in filenames: + try: table = damask.ASCIItable(name = name, + buffered = False, + readonly = True) + except: continue + damask.util.report(scriptName, name) -# --------------- figure out columns to process - active = {} - column = {} +# --- interpret header ---------------------------------------------------------------------------- - array = {} + table.head_read() + + remarks = [] + errors = [] + VTKarray = {} + active = defaultdict(list) - for datatype,info in datainfo.items(): - for label in info['label']: - foundIt = False - for key in ['1_'+label,label]: - if key in table.labels: - foundIt = True - if datatype not in active: active[datatype] = [] - if datatype not in column: column[datatype] = {} - if datatype not in array: array[datatype] = {} - active[datatype].append(label) - column[datatype][label] = table.labels.index(key) # remember columns of requested data - if datatype == 'scalar': - array[datatype][label] = vtk.vtkDoubleArray() - array[datatype][label].SetNumberOfComponents(1) - array[datatype][label].SetName(label) - elif datatype == 'color': - array[datatype][label] = vtk.vtkUnsignedCharArray() - array[datatype][label].SetNumberOfComponents(3) - array[datatype][label].SetName(label) - if not foundIt: - file['croak'].write('column %s not found...\n'%label) - + for datatype,dimension,label in [['scalar',1,options.scalar], + ['vector',3,options.vector], + ['color',3,options.color], + ]: + for i,dim in enumerate(table.label_dimension(label)): + me = label[i] + if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me)) + elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension)) + else: + remarks.append('adding {} "{}"...'.format(datatype,me)) + active[datatype].append(me) + + if datatype in ['scalar','vector']: VTKarray[me] = vtk.vtkDoubleArray() + elif datatype == 'color': VTKarray[me] = vtk.vtkUnsignedCharArray() + + VTKarray[me].SetNumberOfComponents(dimension) + VTKarray[me].SetName(label[i]) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + # ------------------------------------------ process data --------------------------------------- - while table.data_read(): # read next data line of ASCII table + while table.data_read(): # read next data line of ASCII table - for datatype,labels in active.items(): # loop over scalar,color - for label in labels: # loop over all requested items - theData = table.data[column[datatype][label]:\ - column[datatype][label]+datainfo[datatype]['len']] # read strings - if datatype == 'color': - theData = map(lambda x: int(255.*float(x)),theData) - array[datatype][label].InsertNextTuple3(theData[0],theData[1],theData[2],) - elif datatype == 'scalar': - array[datatype][label].InsertNextValue(float(theData[0])) + for datatype,labels in active.items(): # loop over scalar,color + for me in labels: # loop over all requested items + theData = [table.data[i] for i in table.label_indexrange(me)] # read strings + if datatype == 'color': VTKarray[me].InsertNextTuple3(*map(lambda x: int(255.*float(x)),theData)) + elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) + elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) table.input_close() # close input ASCII table # ------------------------------------------ add data --------------------------------------- - for datatype,labels in active.items(): # loop over scalar,color + damask.util.croak('adding now...') + for datatype,labels in active.items(): # loop over scalar,color + damask.util.croak('type {}'.format(datatype)) + if datatype == 'color': - Polydata.GetPointData().SetScalars(array[datatype][labels[0]]) - Polydata.GetCellData().SetScalars(array[datatype][labels[0]]) - for label in labels: # loop over all requested items - Polydata.GetPointData().AddArray(array[datatype][label]) - Polydata.GetCellData().AddArray(array[datatype][label]) + Polydata.GetPointData().SetScalars(VTKarray[active['color'][0]]) + Polydata.GetCellData().SetScalars(VTKarray[active['color'][0]]) + for me in labels: # loop over all requested items + damask.util.croak('label {}'.format(me)) + Polydata.GetPointData().AddArray(VTKarray[me]) + Polydata.GetCellData().AddArray(VTKarray[me]) + + damask.util.croak('...done.') Polydata.Modified() - if vtk.VTK_MAJOR_VERSION <= 5: - Polydata.Update() + if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update() # ------------------------------------------ output result --------------------------------------- -writer = vtk.vtkXMLPolyDataWriter() -writer.SetDataModeToBinary() -writer.SetCompressorTypeToZLib() -writer.SetFileName(os.path.splitext(options.vtk)[0]+'_added.vtp') -if vtk.VTK_MAJOR_VERSION <= 5: - writer.SetInput(Polydata) -else: - writer.SetInputData(Polydata) -writer.Write() + writer = vtk.vtkXMLPolyDataWriter() + writer.SetDataModeToBinary() + writer.SetCompressorTypeToZLib() + writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtp' if options.inplace else '_added.vtp')) + if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata) + else: writer.SetInputData(Polydata) + writer.Write() + +# ------------------------------------------ render result --------------------------------------- + +if options.render: + mapper = vtk.vtkDataSetMapper() + mapper.SetInputData(Polydata) + actor = vtk.vtkActor() + actor.SetMapper(mapper) + +# Create the graphics structure. The renderer renders into the +# render window. The render window interactor captures mouse events +# and will perform appropriate camera or actor manipulation +# depending on the nature of the events. + + ren = vtk.vtkRenderer() + + renWin = vtk.vtkRenderWindow() + renWin.AddRenderer(ren) + + ren.AddActor(actor) + ren.SetBackground(1, 1, 1) + renWin.SetSize(200, 200) + + iren = vtk.vtkRenderWindowInteractor() + iren.SetRenderWindow(renWin) + + iren.Initialize() + renWin.Render() + iren.Start() From 1994b5a4c12955a030651e567a5b02b4f8cd4054 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 19:29:04 -0400 Subject: [PATCH 08/20] removed debug messages --- processing/post/vtk_addPointcloudData.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/processing/post/vtk_addPointcloudData.py b/processing/post/vtk_addPointcloudData.py index 602e69307..58190de27 100755 --- a/processing/post/vtk_addPointcloudData.py +++ b/processing/post/vtk_addPointcloudData.py @@ -123,20 +123,14 @@ for name in filenames: # ------------------------------------------ add data --------------------------------------- - damask.util.croak('adding now...') for datatype,labels in active.items(): # loop over scalar,color - damask.util.croak('type {}'.format(datatype)) - if datatype == 'color': Polydata.GetPointData().SetScalars(VTKarray[active['color'][0]]) Polydata.GetCellData().SetScalars(VTKarray[active['color'][0]]) for me in labels: # loop over all requested items - damask.util.croak('label {}'.format(me)) Polydata.GetPointData().AddArray(VTKarray[me]) Polydata.GetCellData().AddArray(VTKarray[me]) - damask.util.croak('...done.') - Polydata.Modified() if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update() From 170d377092b749a460bbbde1c3cfb5b73c0b5e5a Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Wed, 13 Apr 2016 19:33:46 -0400 Subject: [PATCH 09/20] much improved algorithm to speed up grain identification. --- processing/post/addGrainID.py | 118 +++++++++++++--------------------- 1 file changed, 44 insertions(+), 74 deletions(-) diff --git a/processing/post/addGrainID.py b/processing/post/addGrainID.py index 7eb48f286..a250c197c 100755 --- a/processing/post/addGrainID.py +++ b/processing/post/addGrainID.py @@ -5,7 +5,6 @@ import numpy as np import damask from optparse import OptionParser from scipy import spatial -from collections import defaultdict scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) @@ -23,7 +22,7 @@ parser.add_option('-r', '--radius', parser.add_option('-d', '--disorientation', dest = 'disorientation', type = 'float', metavar = 'float', - help = 'disorientation threshold per grain [%default] (degrees)') + help = 'disorientation threshold in degrees [%default]') parser.add_option('-s', '--symmetry', dest = 'symmetry', type = 'string', metavar = 'string', @@ -61,7 +60,8 @@ parser.add_option('-p', '--position', type = 'string', metavar = 'string', help = 'spatial position of voxel [%default]') -parser.set_defaults(symmetry = 'cubic', +parser.set_defaults(disorientation = 5, + symmetry = 'cubic', coords = 'pos', degrees = False, ) @@ -86,17 +86,16 @@ if np.sum(input) != 1: parser.error('needs exactly one input format.') (options.matrix,9,'matrix'), (options.quaternion,4,'quaternion'), ][np.where(input)[0][0]] # select input label that was requested -toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians -cos_disorientation = np.cos(options.disorientation/2.*toRadians) +toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians +cos_disorientation = np.cos(np.radians(options.disorientation/2.)) # cos of half the disorientation angle # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) + try: table = damask.ASCIItable(name = name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -109,8 +108,10 @@ for name in filenames: errors = [] remarks = [] - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim)) + if not 3 >= table.label_dimension(options.coords) >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + if not np.all(table.label_dimension(label) == dim): + errors.append('input {} does not have dimension {}.'.format(label,dim)) else: column = table.label_index(label) if remarks != []: damask.util.croak(remarks) @@ -122,8 +123,10 @@ for name in filenames: # ------------------------------------------ assemble header --------------------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append('grainID_{}@{}'.format(label, - options.disorientation if options.degrees else np.degrees(options.disorientation))) # report orientation source and disorientation in degrees + table.labels_append('grainID_{}@{:g}'.format('+'.join(label) + if isinstance(label, (list,tuple)) + else label, + options.disorientation)) # report orientation source and disorientation table.head_write() # ------------------------------------------ process data ------------------------------------------ @@ -162,7 +165,7 @@ for name in filenames: time_delta = (time.clock()-tick) * (len(grainID) - p) / p bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'\ - %(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) + %(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),np.count_nonzero(memberCounts))) if inputtype == 'eulers': o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians, @@ -179,84 +182,51 @@ for name in filenames: o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])), symmetry = options.symmetry).reduced() - matched = False + matched = False + alreadyChecked = {} + candidates = [] + bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case -# check against last matched needs to be really picky. best would be to exclude jumps across the poke (checking distance between last and me?) -# when walking through neighborhood first check whether grainID of that point has already been tested, if yes, skip! - - if matchedID != -1: # has matched before? - matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation - - if not matched: - alreadyChecked = {} - bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case - for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points - gID = grainID[i] - if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested? - alreadyChecked[gID] = True # remember not to check again - disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation - if disorientation.quaternion.w > cos_disorientation and \ - disorientation.quaternion.w >= bestDisorientation.w: # within threshold and betterthan current best? + for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points + gID = grainID[i] + if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested? + alreadyChecked[gID] = True # remember not to check again + disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation + if disorientation.quaternion.w > cos_disorientation: # within threshold ... + candidates.append(gID) # remember as potential candidate + if disorientation.quaternion.w >= bestDisorientation.w: # ... and better than current best? matched = True matchedID = gID # remember that grain bestDisorientation = disorientation.quaternion - if not matched: # no match -> new grain found - memberCounts += [1] # start new membership counter + if matched: # did match existing grain + memberCounts[matchedID] += 1 + if len(candidates) > 1: # ambiguity in grain identification? + largestGrain = sorted(candidates,key=lambda x:memberCounts[x])[-1] # find largest among potential candidate grains + matchedID = largestGrain + for c in [c for c in candidates if c != largestGrain]: # loop over smaller candidates + memberCounts[largestGrain] += memberCounts[c] # reassign member count of smaller to largest + memberCounts[c] = 0 + grainID = np.where(np.in1d(grainID,candidates), largestGrain, grainID) # relabel grid points of smaller candidates as largest one + + else: # no match -> new grain found orientations += [o] # initialize with current orientation + memberCounts += [1] # start new membership counter matchedID = g g += 1 # increment grain counter - else: # did match existing grain - memberCounts[matchedID] += 1 - grainID[p] = matchedID # remember grain index assigned to point p += 1 # increment point - bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations))) - - memberCounts = np.array(memberCounts) - similarOrientations = [[] for i in xrange(len(orientations))] - - for i,orientation in enumerate(orientations[:-1]): # compare each identified orientation... - for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards - if orientation.disorientation(orientations[j],SST = False)[0].quaternion.w > cos_disorientation: # similar orientations in both grainIDs? - similarOrientations[i].append(j) # remember in upper triangle... - similarOrientations[j].append(i) # ...and lower triangle of matrix - - if similarOrientations[i] != []: - bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(str,similarOrientations[i])))) - - stillShifting = True - while stillShifting: - stillShifting = False - tick = time.clock() - - for p,gID in enumerate(grainID): # walk through all points - if p > 0 and p % 1000 == 0: - - time_delta = (time.clock()-tick) * (len(grainID) - p) / p - bg.set_message('(%02i:%02i:%02i) shifting ID of point %i out of %i (grain count %i)...' - %(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) - if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else? - similarNeighbors = defaultdict(int) # frequency of neighboring grainIDs sharing my orientation - for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring point - if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation? - similarNeighbors[grainID[i]] += 1 # remember its grainID - if similarNeighbors != {}: # found similar orientation(s) in neighborhood - candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me - grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset - memberCounts[gID] -= 1 # my former ID loses one fellow - memberCounts[grainID[p]] += 1 # my new ID gains one fellow - bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID - stillShifting = True - + grainIDs = np.where(np.array(memberCounts) > 0)[0] # identify "live" grain identifiers + packingMap = dict(zip(list(grainIDs),range(len(grainIDs)))) # map to condense into consecutive IDs + table.data_rewind() outputAlive = True p = 0 while outputAlive and table.data_read(): # read next data line of ASCII table - table.data_append(1+grainID[p]) # add grain ID + table.data_append(1+packingMap[grainID[p]]) # add (condensed) grain ID outputAlive = table.data_write() # output processed line p += 1 From dbfd107b480e135cdfdea50661002b69ee27da36 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Thu, 14 Apr 2016 17:43:19 -0400 Subject: [PATCH 10/20] Add deformed configuration of given initial coordinates. Operates on periodic three-dimensional x,y,z-ordered data sets. Replaces 3Dvisualize..! --- processing/post/addDisplacements.py | 222 ++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) create mode 100755 processing/post/addDisplacements.py diff --git a/processing/post/addDisplacements.py b/processing/post/addDisplacements.py new file mode 100755 index 000000000..3c3456b91 --- /dev/null +++ b/processing/post/addDisplacements.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 no BOM -*- + +import os,sys,math +import numpy as np +import scipy.ndimage +from optparse import OptionParser +import damask + +scriptName = os.path.splitext(os.path.basename(__file__))[0] +scriptID = ' '.join([scriptName,damask.version]) + + +#-------------------------------------------------------------------------------------------------- +def cell2node(cellData,grid): + + nodeData = 0.0 + datalen = np.array(cellData.shape[3:]).prod() + + for i in xrange(datalen): + node = scipy.ndimage.convolve(cellData.reshape(tuple(grid)+(datalen,))[...,i], + np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells + mode = 'wrap', + origin = -1, # offset to have cell origin as center + ) # now averaged at cell origins + node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z + node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y + node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x + + nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1) + + return nodeData + +#-------------------------------------------------------------------------------------------------- +def displacementAvgFFT(F,grid,size,nodal=False,transformed=False): + """calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" + if nodal: + x, y, z = np.meshgrid(np.linspace(0,size[0],1+grid[0]), + np.linspace(0,size[1],1+grid[1]), + np.linspace(0,size[2],1+grid[2]), + indexing = 'ij') + else: + x, y, z = np.meshgrid(np.linspace(0,size[0],grid[0],endpoint=False), + np.linspace(0,size[1],grid[1],endpoint=False), + np.linspace(0,size[2],grid[2],endpoint=False), + indexing = 'ij') + + origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3) + + F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data + Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average + avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X + + return avgDisplacement + +#-------------------------------------------------------------------------------------------------- +def displacementFluctFFT(F,grid,size,nodal=False,transformed=False): + """calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell""" + integrator = 0.5j * size / math.pi + + kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])), + np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])), + np.arange(grid[0]//2+1), + indexing = 'ij') + k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3) + k_sSquared = np.einsum('...l,...l',k_s,k_s) + k_sSquared[0,0,0] = 1.0 # ignore global average frequency + +#-------------------------------------------------------------------------------------------------- +# integration in Fourier space + + displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm', + F if transformed else np.fft.rfftn(F,axes=(0,1,2)), + k_s, + integrator, + ) / k_sSquared[...,np.newaxis] + +#-------------------------------------------------------------------------------------------------- +# backtransformation to real space + + displacement = np.fft.irfftn(displacement_fourier,grid,axes=(0,1,2)) + + return cell2node(displacement,grid) if nodal else displacement + + +# -------------------------------------------------------------------- +# MAIN +# -------------------------------------------------------------------- + +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ +Add deformed configuration of given initial coordinates. +Operates on periodic three-dimensional x,y,z-ordered data sets. + +""", version = scriptID) + +parser.add_option('-f', '--defgrad', + dest = 'defgrad', + metavar = 'string', + help = 'column label of deformation gradient [%default]') +parser.add_option('-c', '--coordinates', + dest = 'coords', + metavar = 'string', + help = 'column label of coordinates [%default]') +parser.add_option('--nodal', + dest = 'nodal', + action = 'store_true', + help = 'output nodal (not cell-centered) displacements') + +parser.set_defaults(defgrad = 'f', + coords = 'ipinitialcoord', + nodal = False, + ) + +(options,filenames) = parser.parse_args() + +# --- loop over input files ------------------------------------------------------------------------- + +if filenames == []: filenames = [None] + +for name in filenames: + try: table = damask.ASCIItable(name = name, + outname = (os.path.splitext(name)[0]+ + '_nodal'+ + os.path.splitext(name)[1]) if (options.nodal and name) else None, + buffered = False) + except: continue + damask.util.report(scriptName,name) + +# ------------------------------------------ read header ------------------------------------------ + + table.head_read() + +# ------------------------------------------ sanity checks ---------------------------------------- + + errors = [] + remarks = [] + + if table.label_dimension(options.defgrad) != 9: + errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) + + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords)) + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss=True) + continue + +# --------------- figure out size and grid --------------------------------------------------------- + + table.data_readArray([options.defgrad,options.coords]) + table.data_rewind() + + if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape + if table.data[:,9:].shape[1] < 3: + table.data = np.hstack((table.data, + np.zeros((table.data.shape[0], + 3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# --------------- figure out size and grid --------------------------------------------------------- + + coords = [np.unique(table.data[:,9+i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings + + N = grid.prod() + + if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue + +# ------------------------------------------ process data ------------------------------------------ + + F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once... + + displacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True) + avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True) + +# ------------------------------------------ assemble header --------------------------------------- + + if options.nodal: + table.info_clear() + table.labels_clear() + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) + table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) + + ['{}_avg({}).{}' .format(i+1,options.defgrad,options.coords) for i in xrange(3)] + + ['{}_fluct({}).{}'.format(i+1,options.defgrad,options.coords) for i in xrange(3)] ) + table.head_write() + +# ------------------------------------------ output data ------------------------------------------- + + zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else xrange(grid[2]) + yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else xrange(grid[1]) + xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else xrange(grid[0]) + + for i,z in enumerate(zrange): + for j,y in enumerate(yrange): + for k,x in enumerate(xrange): + if options.nodal: table.data_clear() + else: table.data_read() + table.data_append([x,y,z] if options.nodal else []) + table.data_append(list(avgDisplacement[i,j,k,:])) + table.data_append(list( displacement[i,j,k,:])) + table.data_write() + +# ------------------------------------------ output finalization ----------------------------------- + + table.close() # close ASCII tables From 01ba11448c5e991378e1ce4039fec3cc651ca181 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 07:41:24 -0400 Subject: [PATCH 11/20] small polishing of remark output regarding <3D vectors --- processing/post/addDisplacements.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/processing/post/addDisplacements.py b/processing/post/addDisplacements.py index 3c3456b91..98d5c3291 100755 --- a/processing/post/addDisplacements.py +++ b/processing/post/addDisplacements.py @@ -139,8 +139,12 @@ for name in filenames: errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) coordDim = table.label_dimension(options.coords) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) - elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords)) + if not 3 >= coordDim >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + elif coordDim < 3: + remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, + 's' if coordDim < 2 else '', + options.coords)) if remarks != []: damask.util.croak(remarks) if errors != []: From f040842ed4418bf5776747090548282906f9964e Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 07:41:55 -0400 Subject: [PATCH 12/20] superseded by addDisplacements --- processing/post/addDeformedConfiguration.py | 164 -------------------- 1 file changed, 164 deletions(-) delete mode 100755 processing/post/addDeformedConfiguration.py diff --git a/processing/post/addDeformedConfiguration.py b/processing/post/addDeformedConfiguration.py deleted file mode 100755 index 3fb39ee0d..000000000 --- a/processing/post/addDeformedConfiguration.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 no BOM -*- - -import os,sys,math -import numpy as np -from optparse import OptionParser -import damask - -scriptName = os.path.splitext(os.path.basename(__file__))[0] -scriptID = ' '.join([scriptName,damask.version]) - -#-------------------------------------------------------------------------------------------------- -def deformedCoordsFFT(F,undeformed=False): - - wgt = 1.0/grid.prod() - integrator = np.array([0.+1.j,0.+1.j,0.+1.j],'c16') * size/ 2.0 / math.pi - step = size/grid - - F_fourier = np.fft.rfftn(F,axes=(0,1,2)) - coords_fourier = np.zeros(F_fourier.shape[0:4],'c16') - - if undeformed: - Favg=np.eye(3) - else: - Favg=np.real(F_fourier[0,0,0,:,:])*wgt -#-------------------------------------------------------------------------------------------------- -# integration in Fourier space - k_s = np.zeros([3],'i') - for i in xrange(grid[2]): - k_s[2] = i - if(i > grid[2]//2 ): k_s[2] = k_s[2] - grid[2] - for j in xrange(grid[1]): - k_s[1] = j - if(j > grid[1]//2 ): k_s[1] = k_s[1] - grid[1] - for k in xrange(grid[0]//2+1): - k_s[0] = k - for m in xrange(3): - coords_fourier[i,j,k,m] = sum(F_fourier[i,j,k,m,0:3]*k_s*integrator) - if (any(k_s != 0)): - coords_fourier[i,j,k,0:3] /= -sum(k_s*k_s) - -#-------------------------------------------------------------------------------------------------- -# add average to scaled fluctuation and put (0,0,0) on (0,0,0) - coords = np.fft.irfftn(coords_fourier,F.shape[0:3],axes=(0,1,2)) - - offset_coords = np.dot(F[0,0,0,:,:],step/2.0) - scaling*coords[0,0,0,0:3] - for z in xrange(grid[2]): - for y in xrange(grid[1]): - for x in xrange(grid[0]): - coords[z,y,x,0:3] = scaling*coords[z,y,x,0:3] \ - + offset_coords \ - + np.dot(Favg,step*np.array([x,y,z])) - - return coords - -# -------------------------------------------------------------------- -# MAIN -# -------------------------------------------------------------------- - -parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ -Add deformed configuration of given initial coordinates. -Operates on periodic three-dimensional x,y,z-ordered data sets. - -""", version = scriptID) - -parser.add_option('-f', '--defgrad',dest='defgrad', metavar = 'string', - help='heading of deformation gradient columns [%default]') -parser.add_option('--reference', dest='undeformed', action='store_true', - help='map results to reference (undeformed) average configuration [%default]') -parser.add_option('--scaling', dest='scaling', action='extend', metavar = '', - help='scaling of fluctuation') -parser.add_option('-u', '--unitlength', dest='unitlength', type='float', metavar = 'float', - help='set unit length for 2D model [%default]') -parser.add_option('--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') - -parser.set_defaults(defgrad = 'f') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(scaling = []) -parser.set_defaults(undeformed = False) -parser.set_defaults(unitlength = 0.0) - -(options,filenames) = parser.parse_args() - -options.scaling += [1.0 for i in xrange(max(0,3-len(options.scaling)))] -scaling = map(float, options.scaling) - - -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - -for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) - -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - remarks = [] - - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) - - if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad)) - else: colF = table.label_index(options.defgrad) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# --------------- figure out size and grid --------------------------------------------------------- - - table.data_readArray() - - coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)] - mincorner = np.array(map(min,coords)) - maxcorner = np.array(map(max,coords)) - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings - - N = grid.prod() - - if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid)) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for coord in xrange(3): - label = '{}_{}_{}'.format(coord+1,options.defgrad,options.coords) - if np.any(scaling) != 1.0: label+='_{}_{}_{}'.format(scaling) - if options.undeformed: label+='_undeformed' - table.labels_append([label]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ read deformation gradient field ----------------------- - centroids = deformedCoordsFFT(table.data[:,colF:colF+9].reshape(grid[2],grid[1],grid[0],3,3), - options.undeformed) -# ------------------------------------------ process data ------------------------------------------ - table.data_rewind() - for z in xrange(grid[2]): - for y in xrange(grid[1]): - for x in xrange(grid[0]): - table.data_read() - table.data_append(list(centroids[z,y,x,:])) - table.data_write() - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables \ No newline at end of file From 3474800db1241a22fb99213d6d30e3409594f961 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 14:04:10 -0400 Subject: [PATCH 13/20] switched coordinates default from "ipinitialcoords" (ugh) to "pos" --- processing/post/vtk_rectilinearGrid.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/processing/post/vtk_rectilinearGrid.py b/processing/post/vtk_rectilinearGrid.py index 38deabf3a..6f1228ad8 100755 --- a/processing/post/vtk_rectilinearGrid.py +++ b/processing/post/vtk_rectilinearGrid.py @@ -24,11 +24,11 @@ parser.add_option('-m', '--mode', type = 'choice', choices = ['cell','point'], help = 'cell-centered or point-centered coordinates ') parser.add_option('-c', '--coordinates', - dest = 'position', + dest = 'coords', type = 'string', metavar = 'string', help = 'coordinate label [%default]') -parser.set_defaults(position ='ipinitialcoord', - mode ='cell' +parser.set_defaults(coords = 'pos', + mode = 'cell' ) (options, filenames) = parser.parse_args() @@ -50,9 +50,9 @@ for name in filenames: remarks = [] errors = [] - coordDim = table.label_dimension(options.position) - if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.position)) - elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.position)) + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords)) if remarks != []: damask.util.croak(remarks) if errors != []: @@ -62,7 +62,7 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray(options.position) + table.data_readArray(options.coords) if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape if table.data.shape[1] < 3: table.data = np.hstack((table.data, @@ -109,7 +109,7 @@ for name in filenames: writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] \ - +'_{}({})'.format(options.position, options.mode) \ + +'_{}({})'.format(options.coords, options.mode) \ +'.'+writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() From cbe9afb2b3b1f8353842d6233f1d2043be4c7026 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 14:06:01 -0400 Subject: [PATCH 14/20] removed --mode option. script figures on its own. --- processing/post/vtk_addRectilinearGridData.py | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/processing/post/vtk_addRectilinearGridData.py b/processing/post/vtk_addRectilinearGridData.py index 6b78950c7..d54bb4cf4 100755 --- a/processing/post/vtk_addRectilinearGridData.py +++ b/processing/post/vtk_addRectilinearGridData.py @@ -30,10 +30,6 @@ parser.add_option('-r', '--render', dest = 'render', action = 'store_true', help = 'open output in VTK render window') -parser.add_option('-m', '--mode', - dest = 'mode', - type = 'choice', metavar = 'string', choices = ['cell', 'point'], - help = 'cell-centered or point-centered data') parser.add_option('-s', '--scalar', dest = 'scalar', action = 'extend', metavar = '', @@ -56,7 +52,6 @@ parser.set_defaults(scalar = [], (options, filenames) = parser.parse_args() -if not options.mode: parser.error('No data mode specified.') if not options.vtk: parser.error('No VTK file specified.') if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') @@ -124,8 +119,11 @@ for name in filenames: # ------------------------------------------ process data --------------------------------------- + datacount = 0 + while table.data_read(): # read next data line of ASCII table - + + datacount += 1 # count data lines for datatype,labels in active.items(): # loop over scalar,color for me in labels: # loop over all requested items theData = [table.data[i] for i in table.label_indexrange(me)] # read strings @@ -133,17 +131,25 @@ for name in filenames: elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) - table.input_close() # close input ASCII table + table.close() # close input ASCII table # ------------------------------------------ add data --------------------------------------- + if datacount == Npoints: mode = 'point' + elif datacount == Ncells: mode = 'cell' + else: + damask.util.croak('Data count is incompatible with grid...') + continue + + damask.util.croak('{} mode...'.format(mode)) + for datatype,labels in active.items(): # loop over scalar,color if datatype == 'color': - if options.mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) - elif options.mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) + if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]]) + elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]]) for me in labels: # loop over all requested items - if options.mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) - elif options.mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) + if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me]) + elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me]) rGrid.Modified() if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update() From 8ac40ced5a3b355db1fb4099c94b4f24fb478576 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 18:23:35 -0400 Subject: [PATCH 15/20] coordinates of nodal or cell-centered values are always labelled "pos" --- processing/post/postResults.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/processing/post/postResults.py b/processing/post/postResults.py index baa306003..95b9eabf8 100755 --- a/processing/post/postResults.py +++ b/processing/post/postResults.py @@ -1003,11 +1003,8 @@ fileOpen = False assembleHeader = True header = [] standard = ['inc'] + \ - {True: ['time'], - False:[]}[options.time] + \ - ['elem','node','ip','grain'] + \ - {True: ['1_nodeinitialcoord','2_nodeinitialcoord','3_nodeinitialcoord'], - False:['1_ipinitialcoord','2_ipinitialcoord','3_ipinitialcoord']}[options.nodalScalar != []] + ['time'] if options.time else [] + \ + ['elem','node','ip','grain','1_pos','2_pos','3_pos'] # --------------------------- loop over positions -------------------------------- From 7567aae7c08da7763fa71e05f1e308ffdfdcb1ac Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 18:25:56 -0400 Subject: [PATCH 16/20] now deals with 1D, 2D, 3D. speed up of grid detection. "pos" as default coordinate label. --- processing/post/addEuclideanDistance.py | 51 +++++++++++++------------ 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/processing/post/addEuclideanDistance.py b/processing/post/addEuclideanDistance.py index 7f32abb6b..5299c4d27 100755 --- a/processing/post/addEuclideanDistance.py +++ b/processing/post/addEuclideanDistance.py @@ -89,19 +89,20 @@ Add column(s) containing Euclidean distance to grain structural features: bounda """, version = scriptID) parser.add_option('-c','--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') + help='column label of coordinates [%default]') parser.add_option('-i','--identifier', dest='id', metavar = 'string', - help='heading of column containing grain identifier [%default]') + help='column label of grain identifier [%default]') parser.add_option('-t','--type', dest = 'type', action = 'extend', metavar = '', help = 'feature type {%s} '%(', '.join(map(lambda x:'/'.join(x['names']),features))) ) parser.add_option('-n','--neighborhood',dest='neighborhood', choices = neighborhoods.keys(), metavar = 'string', help = 'type of neighborhood [neumann] {%s}'%(', '.join(neighborhoods.keys()))) -parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar='float', +parser.add_option('-s', '--scale', dest = 'scale', type = 'float', metavar = 'float', help = 'voxel size [%default]') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(id = 'texture') -parser.set_defaults(neighborhood = 'neumann') -parser.set_defaults(scale = 1.0) +parser.set_defaults(coords = 'pos', + id = 'texture', + neighborhood = 'neumann', + scale = 1.0, + ) (options,filenames) = parser.parse_args() @@ -125,10 +126,8 @@ for i,feature in enumerate(features): if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue + try: table = damask.ASCIItable(name = name, buffered = False) + except: continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ @@ -141,9 +140,11 @@ for name in filenames: remarks = [] column = {} - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + coordDim = table.label_dimension(options.coords) + if not 3 >= coordDim >= 1: + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) else: coordCol = table.label_index(options.coords) - + if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id)) else: idCol = table.label_index(options.id) @@ -164,18 +165,20 @@ for name in filenames: table.data_readArray() - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,coordCol+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness + coords = [np.unique(table.data[:,coordCol+i]) for i in xrange(coordDim)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords)+[1]*(3-len(coords)),'i') - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings + N = grid.prod() + + if N != len(table.data): errors.append('data count {} does not match grid '.format(N) + + 'x'.join(map(str,grid)) + + '.') + if errors != []: + damask.util.croak(errors) + table.close(dismiss = True) + continue # ------------------------------------------ process value field ----------------------------------- From 81f6530f5254bb98c044c8ac5b9dabe0a58ea51a Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 18:27:23 -0400 Subject: [PATCH 17/20] changed default label of coordinates to "pos" --- processing/post/addCurl.py | 10 +-- processing/post/addDisplacements.py | 5 +- processing/post/addDivergence.py | 8 +-- processing/post/addGradient.py | 14 +++-- processing/post/averageDown.py | 15 ++--- processing/post/blowUp.py | 95 ++++++++++++++--------------- 6 files changed, 73 insertions(+), 74 deletions(-) diff --git a/processing/post/addCurl.py b/processing/post/addCurl.py index d8b1ee025..683fc0631 100755 --- a/processing/post/addCurl.py +++ b/processing/post/addCurl.py @@ -71,18 +71,18 @@ Deals with both vector- and tensor-valued fields. parser.add_option('-c','--coordinates', dest = 'coords', - type = 'string', metavar='string', - help = 'column heading for coordinates [%default]') + type = 'string', metavar = 'string', + help = 'column label of coordinates [%default]') parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '', - help = 'heading of columns containing vector field values') + help = 'column label(s) of vector field values') parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '', - help = 'heading of columns containing tensor field values') + help = 'column label(s) of tensor field values') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', ) (options,filenames) = parser.parse_args() diff --git a/processing/post/addDisplacements.py b/processing/post/addDisplacements.py index 98d5c3291..b73994bde 100755 --- a/processing/post/addDisplacements.py +++ b/processing/post/addDisplacements.py @@ -88,8 +88,9 @@ def displacementFluctFFT(F,grid,size,nodal=False,transformed=False): # -------------------------------------------------------------------- parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ -Add deformed configuration of given initial coordinates. +Add displacments resulting from deformation gradient field. Operates on periodic three-dimensional x,y,z-ordered data sets. +Outputs at cell centers or cell nodes (into separate file). """, version = scriptID) @@ -107,7 +108,7 @@ parser.add_option('--nodal', help = 'output nodal (not cell-centered) displacements') parser.set_defaults(defgrad = 'f', - coords = 'ipinitialcoord', + coords = 'pos', nodal = False, ) diff --git a/processing/post/addDivergence.py b/processing/post/addDivergence.py index aadaceabf..8d58367ac 100755 --- a/processing/post/addDivergence.py +++ b/processing/post/addDivergence.py @@ -58,17 +58,17 @@ Deals with both vector- and tensor-valued fields. parser.add_option('-c','--coordinates', dest = 'coords', type = 'string', metavar = 'string', - help = 'column heading for coordinates [%default]') + help = 'column label of coordinates [%default]') parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '', - help = 'heading of columns containing vector field values') + help = 'column label(s) of vector field values') parser.add_option('-t','--tensor', dest = 'tensor', action = 'extend', metavar = '', - help = 'heading of columns containing tensor field values') + help = 'column label(s) of tensor field values') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', ) (options,filenames) = parser.parse_args() diff --git a/processing/post/addGradient.py b/processing/post/addGradient.py index 555e587de..4d136c8b9 100755 --- a/processing/post/addGradient.py +++ b/processing/post/addGradient.py @@ -9,7 +9,9 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) +#-------------------------------------------------------------------------------------------------- def gradFFT(geomdim,field): + grid = np.array(np.shape(field)[2::-1]) N = grid.prod() # field size n = np.array(np.shape(field)[3:]).prod() # data size @@ -17,7 +19,7 @@ def gradFFT(geomdim,field): elif n == 1: dataType = 'scalar' field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) - grad_fourier = np.zeros(field_fourier.shape+(3,),'c16') + grad_fourier = np.zeros(field_fourier.shape+(3,),'c16') # differentiation in Fourier space k_s = np.zeros([3],'i') @@ -61,17 +63,17 @@ Deals with both vector- and scalar fields. parser.add_option('-c','--coordinates', dest = 'coords', type = 'string', metavar='string', - help = 'column heading for coordinates [%default]') + help = 'column label of coordinates [%default]') parser.add_option('-v','--vector', dest = 'vector', action = 'extend', metavar = '', - help = 'heading of columns containing vector field values') + help = 'column label(s) of vector field values') parser.add_option('-s','--scalar', dest = 'scalar', action = 'extend', metavar = '', - help = 'heading of columns containing scalar field values') + help = 'column label(s) of scalar field values') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', ) (options,filenames) = parser.parse_args() @@ -96,7 +98,7 @@ for name in filenames: items = { 'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []}, - 'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []}, + 'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []}, } errors = [] remarks = [] diff --git a/processing/post/averageDown.py b/processing/post/averageDown.py index f77914374..0af56e176 100755 --- a/processing/post/averageDown.py +++ b/processing/post/averageDown.py @@ -22,7 +22,7 @@ Average each data block of size 'packing' into single values thus reducing the f parser.add_option('-c','--coordinates', dest = 'coords', type = 'string', metavar = 'string', - help = 'column heading for coordinates [%default]') + help = 'column label of coordinates [%default]') parser.add_option('-p','--packing', dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int', @@ -39,7 +39,7 @@ parser.add_option('-s', '--size', dest = 'size', type = 'float', nargs = 3, metavar = 'float float float', help = 'size in x,y,z [autodetect]') -parser.set_defaults(coords = 'ipinitialcoord', +parser.set_defaults(coords = 'pos', packing = (2,2,2), shift = (0,0,0), grid = (0,0,0), @@ -59,11 +59,10 @@ if any(shift != 0): prefix += 'shift{:+}{:+}{:+}_'.format(*shift) if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+os.path.basename(name)) if name else name, - buffered = False) + try: table = damask.ASCIItable(name = name, + outname = os.path.join(os.path.dirname(name), + prefix+os.path.basename(name)) if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -75,7 +74,6 @@ for name in filenames: errors = [] remarks = [] - colCoord = None if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) else: colCoord = table.label_index(options.coords) @@ -86,7 +84,6 @@ for name in filenames: table.close(dismiss = True) continue - # ------------------------------------------ assemble header --------------------------------------- table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) diff --git a/processing/post/blowUp.py b/processing/post/blowUp.py index 725e623b4..7b8c9bd15 100755 --- a/processing/post/blowUp.py +++ b/processing/post/blowUp.py @@ -19,83 +19,82 @@ to resolution*packing. """, version = scriptID) -parser.add_option('-c','--coordinates', dest='coords', metavar='string', - help='column heading for coordinates [%default]') -parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, metavar='int int int', - help='dimension of packed group [%default]') -parser.add_option('-g','--grid', dest='resolution', type='int', nargs=3, metavar='int int int', - help='resolution in x,y,z [autodetect]') -parser.add_option('-s','--size', dest='dimension', type='float', nargs=3, metavar='int int int', - help='dimension in x,y,z [autodetect]') -parser.set_defaults(coords = 'ipinitialcoord') -parser.set_defaults(packing = (2,2,2)) -parser.set_defaults(grid = (0,0,0)) -parser.set_defaults(size = (0.0,0.0,0.0)) +parser.add_option('-c','--coordinates', + dest = 'coords', metavar = 'string', + help = 'column label of coordinates [%default]') +parser.add_option('-p','--packing', + dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int', + help = 'dimension of packed group [%default]') +parser.add_option('-g','--grid', + dest = 'resolution', type = 'int', nargs = 3, metavar = 'int int int', + help = 'resolution in x,y,z [autodetect]') +parser.add_option('-s','--size', + dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int', + help = 'dimension in x,y,z [autodetect]') +parser.set_defaults(coords = 'pos', + packing = (2,2,2), + grid = (0,0,0), + size = (0.0,0.0,0.0), + ) (options,filenames) = parser.parse_args() - options.packing = np.array(options.packing) -prefix = 'blowUp%ix%ix%i_'%(options.packing[0],options.packing[1],options.packing[2]) +prefix = 'blowUp{}x{}x{}_'.format(*options.packing) # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - outname = os.path.join(os.path.dirname(name), - prefix+ \ - os.path.basename(name)) if name else name, - buffered = False) + try: table = damask.ASCIItable(name = name, + outname = os.path.join(os.path.dirname(name), + prefix+os.path.basename(name)) if name else name, + buffered = False) except: continue damask.util.report(scriptName,name) # ------------------------------------------ read header ------------------------------------------ table.head_read() - errors = [] # ------------------------------------------ sanity checks ---------------------------------------- - if table.label_dimension(options.coords) != 3: - damask.util.croak('coordinates {} are not a vector.'.format(options.coords)) + errors = [] + remarks = [] + + if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) + else: colCoord = table.label_index(options.coords) + + colElem = table.label_index('elem') + + if remarks != []: damask.util.croak(remarks) + if errors != []: + damask.util.croak(errors) table.close(dismiss = True) continue - else: - coordCol = table.label_index(options.coords) - - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray() - - coords = [{},{},{}] - for i in xrange(len(table.data)): - for j in xrange(3): - coords[j][str(table.data[i,coordCol+j])] = True - grid = np.array(map(len,coords),'i') - size = grid/np.maximum(np.ones(3,'d'),grid-1.0)* \ - np.array([max(map(float,coords[0].keys()))-min(map(float,coords[0].keys())),\ - max(map(float,coords[1].keys()))-min(map(float,coords[1].keys())),\ - max(map(float,coords[2].keys()))-min(map(float,coords[2].keys())),\ - ],'d') # size from bounding box, corrected for cell-centeredness - - size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings + table.data_readArray(options.coords) + coords = [np.unique(table.data[:,i]) for i in xrange(3)] + mincorner = np.array(map(min,coords)) + maxcorner = np.array(map(max,coords)) + grid = np.array(map(len,coords),'i') + size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) + size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings packing = np.array(options.packing,'i') outSize = grid*packing -# ------------------------------------------ assemble header --------------------------------------- +# ------------------------------------------ assemble header -------------------------------------- + + table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.head_write() # ------------------------------------------ process data ------------------------------------------- + table.data_rewind() data = np.zeros(outSize.tolist()+[len(table.labels)]) p = np.zeros(3,'i') @@ -107,15 +106,15 @@ for name in filenames: table.data_read() data[d[0]:d[0]+packing[0], d[1]:d[1]+packing[1], - d[2]:d[2]+packing[2], + d[2]:d[2]+packing[2], : ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size elementSize = size/grid/packing elem = 1 for c in xrange(outSize[2]): for b in xrange(outSize[1]): for a in xrange(outSize[0]): - data[a,b,c,coordCol:coordCol+3] = [a+0.5,b+0.5,c+0.5]*elementSize - data[a,b,c,table.label_index('elem')] = elem + data[a,b,c,colCoord:colCoord+3] = [a+0.5,b+0.5,c+0.5]*elementSize + if colElem != -1: data[a,b,c,colElem] = elem table.data = data[a,b,c,:].tolist() outputAlive = table.data_write() # output processed line elem += 1 From 24d029c7ed51bab68c21cfd4954d5bf19eec3ebb Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Fri, 15 Apr 2016 18:54:09 -0400 Subject: [PATCH 18/20] speed up (x 5 to 10) identification of most frequent microstructure index --- processing/pre/geom_clean.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/processing/pre/geom_clean.py b/processing/pre/geom_clean.py index 18caf68b9..2bef9dc08 100755 --- a/processing/pre/geom_clean.py +++ b/processing/pre/geom_clean.py @@ -6,15 +6,12 @@ import numpy as np import damask from scipy import ndimage from optparse import OptionParser -from collections import defaultdict scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) def mostFrequent(arr): - d = defaultdict(int) - for i in arr: d[i] += 1 - return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0] # return value of most frequent microstructure + return np.argmax(np.bincount(arr)) #-------------------------------------------------------------------------------------------------- @@ -43,10 +40,9 @@ parser.set_defaults(stencil = 3, if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False, - labeled = False) + try: table = damask.ASCIItable(name = name, + buffered = False, + labeled = False) except: continue damask.util.report(scriptName,name) @@ -72,7 +68,7 @@ for name in filenames: # --- read data ------------------------------------------------------------------------------------ - microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure + microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure # --- do work ------------------------------------------------------------------------------------ From 0bc7f36ee5843b88b3a49b5397e30b4de8777127 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Mon, 18 Apr 2016 14:47:50 -0400 Subject: [PATCH 19/20] added (transparent) capability to deal with geom files. --- processing/post/vtk_rectilinearGrid.py | 54 ++++++++++++++++---------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/processing/post/vtk_rectilinearGrid.py b/processing/post/vtk_rectilinearGrid.py index 6f1228ad8..ad1f57c1d 100755 --- a/processing/post/vtk_rectilinearGrid.py +++ b/processing/post/vtk_rectilinearGrid.py @@ -15,7 +15,7 @@ scriptID = ' '.join([scriptName,damask.version]) # -------------------------------------------------------------------- parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Create regular voxel grid from points in an ASCIItable. +Create regular voxel grid from points in an ASCIItable (or geom file). """, version = scriptID) @@ -38,9 +38,12 @@ parser.set_defaults(coords = 'pos', if filenames == []: filenames = [None] for name in filenames: + isGeom = name.endswith('.geom') try: table = damask.ASCIItable(name = name, buffered = False, - readonly = True) + labeled = not isGeom, + readonly = True, + ) except: continue damask.util.report(scriptName,name) @@ -50,7 +53,7 @@ for name in filenames: remarks = [] errors = [] - coordDim = table.label_dimension(options.coords) + coordDim = 3 if isGeom else table.label_dimension(options.coords) if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords)) @@ -62,22 +65,33 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray(options.coords) - if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape - if table.data.shape[1] < 3: - table.data = np.hstack((table.data, - np.zeros((table.data.shape[0], - 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros + if isGeom: + info,extra_header = table.head_getGeom() + coords = [np.linspace(info['origin'][i], + info['origin'][i]+info['size'][i], + num = info['grid'][i]+1, + endpoint = True, + ) for i in xrange(3)] + else: + table.data_readArray(options.coords) + if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape + if table.data.shape[1] < 3: + table.data = np.hstack((table.data, + np.zeros((table.data.shape[0], + 3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros - coords = [np.unique(table.data[:,i]) for i in xrange(3)] - if options.mode == 'cell': - coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + len(coords[i]) > 1]] + \ - [coords[i][j-1] + coords[i][j] for j in xrange(1,len(coords[i]))] + \ - [3.0 * coords[i][-1] - coords[i][-1 - (len(coords[i]) > 1)]]) for i in xrange(3)] - grid = np.array(map(len,coords),'i') - N = grid.prod() if options.mode == 'point' else (grid-1).prod() + coords = [np.unique(table.data[:,i]) for i in xrange(3)] - if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - options.mode == 'cell') )) + if options.mode == 'cell': + coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + len(coords[i]) > 1]] + \ + [coords[i][j-1] + coords[i][j] for j in xrange(1,len(coords[i]))] + \ + [3.0 * coords[i][-1] - coords[i][-1 - (len(coords[i]) > 1)]]) for i in xrange(3)] + + grid = np.array(map(len,coords),'i') + N = grid.prod() if options.mode == 'point' or isGeom else (grid-1).prod() + + if not isGeom and N != len(table.data): + errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*(grid - (options.mode == 'cell')) )) if errors != []: damask.util.croak(errors) table.close(dismiss = True) @@ -108,9 +122,9 @@ for name in filenames: (directory,filename) = os.path.split(name) writer.SetDataModeToBinary() writer.SetCompressorTypeToZLib() - writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] \ - +'_{}({})'.format(options.coords, options.mode) \ - +'.'+writer.GetDefaultFileExtension())) + writer.SetFileName(os.path.join(directory,os.path.splitext(filename)[0] + + ('' if isGeom else '_{}({})'.format(options.coords, options.mode)) + + '.' + writer.GetDefaultFileExtension())) else: writer = vtk.vtkDataSetWriter() writer.WriteToOutputStringOn() From 13e214fe1849d48ef3b2e3f2ee85e1072ce13c38 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Mon, 18 Apr 2016 15:08:29 -0400 Subject: [PATCH 20/20] added support for legacy VTK format --- processing/post/vtk_addPointcloudData.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/processing/post/vtk_addPointcloudData.py b/processing/post/vtk_addPointcloudData.py index 58190de27..419995949 100755 --- a/processing/post/vtk_addPointcloudData.py +++ b/processing/post/vtk_addPointcloudData.py @@ -51,13 +51,22 @@ parser.set_defaults(scalar = [], if not options.vtk: parser.error('No VTK file specified.') if not os.path.exists(options.vtk): parser.error('VTK file does not exist.') -reader = vtk.vtkXMLPolyDataReader() -reader.SetFileName(options.vtk) -reader.Update() -Npoints = reader.GetNumberOfPoints() -Ncells = reader.GetNumberOfCells() -Nvertices = reader.GetNumberOfVerts() -Polydata = reader.GetOutput() +if os.path.splitext(options.vtk)[1] == '.vtp': + reader = vtk.vtkXMLPolyDataReader() + reader.SetFileName(options.vtk) + reader.Update() + Polydata = reader.GetOutput() +elif os.path.splitext(options.vtk)[1] == '.vtk': + reader = vtk.vtkGenericDataObjectReader() + reader.SetFileName(options.vtk) + reader.Update() + Polydata = reader.GetPolyDataOutput() +else: + parser.error('Unsupported VTK file type extension.') + +Npoints = Polydata.GetNumberOfPoints() +Ncells = Polydata.GetNumberOfCells() +Nvertices = Polydata.GetNumberOfVerts() if Npoints != Ncells or Npoints != Nvertices: parser.error('Number of points, cells, and vertices in VTK differ from each other.')