diff --git a/processing/post/addCalculation.py b/processing/post/addCalculation.py index b31629bec..753e48950 100755 --- a/processing/post/addCalculation.py +++ b/processing/post/addCalculation.py @@ -67,7 +67,7 @@ for file in files: for label,formula in zip(options.labels,options.formulas): interpolator = [] - for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula + for column in re.findall(r'#(.+?)#',formula): # loop over column labels in formula formula = formula.replace('#'+column+'#','%f') if column in specials: interpolator += ['specials["%s"]'%column] @@ -87,7 +87,7 @@ for file in files: if label not in brokenFormula: evaluator[label] = "'" + formula + "'%(" + ','.join(interpolator) + ")" -# ------------------------------------------ calculate one result to get length of labels ------ +# ------------------------------------------ calculate one result to get length of labels --------- table.data_read() labelLen = {} for label in options.labels: @@ -102,23 +102,20 @@ for file in files: table.labels_append(label) else: table.labels_append(['%i_%s'%(i+1,label) for i in xrange(labelLen[label])]) - table.head_write() # ------------------------------------------ process data --------------------------------------- outputAlive = True table.data_rewind() - - while outputAlive and table.data_read(): # read next data line of ASCII table - - specials['_row_'] += 1 # count row + while outputAlive and table.data_read(): # read next data line of ASCII table + specials['_row_'] += 1 # count row for label in options.labels: table.data_append(unravel(eval(eval(evaluator[label])))) - outputAlive = table.data_write() # output processed line + outputAlive = table.data_write() # output processed line # ------------------------------------------ output result --------------------------------------- - outputAlive and table.output_flush() # just in case of buffered ASCII table + outputAlive and table.output_flush() # just in case of buffered ASCII table - file['input'].close() # close input ASCII table + file['input'].close() # close input ASCII table (works for stdin) + file['output'].close() # close output ASCII table (works for stdout) if file['name'] != 'STDIN': - file['output'].close() # close output ASCII table - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new + os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new diff --git a/processing/post/addCauchy.py b/processing/post/addCauchy.py index 51d662746..40ee2c879 100755 --- a/processing/post/addCauchy.py +++ b/processing/post/addCauchy.py @@ -1,7 +1,8 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,math,numpy,string +import os,re,sys,math,string +import numpy as np from collections import defaultdict from optparse import OptionParser import damask @@ -24,7 +25,6 @@ parser.add_option('-f','--defgrad', dest='defgrad', type='string', metavar=' help='heading of columns containing deformation gradient [%default]') parser.add_option('-p','--stress', dest='stress', type='string', metavar='string', \ help='heading of columns containing first Piola--Kirchhoff stress [%default]') - parser.set_defaults(defgrad = 'f') parser.set_defaults(stress = 'p') @@ -33,7 +33,7 @@ parser.set_defaults(stress = 'p') if options.defgrad == None or options.stress == None: parser.error('missing data column...') -datainfo = { # list of requested labels per datatype +datainfo = { # list of requested labels per datatype 'defgrad': {'mandatory': True, 'len':9, 'label':[]}, @@ -46,7 +46,6 @@ datainfo = { # lis datainfo['defgrad']['label'].append(options.defgrad) datainfo['stress']['label'].append(options.stress) - # ------------------------------------------ setup file handles --------------------------------------- files = [] if filenames == []: @@ -61,8 +60,8 @@ for file in files: if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') - table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table - table.head_read() # read ASCII header info + table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table + table.head_read() # read ASCII header info table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:])) active = defaultdict(list) @@ -75,36 +74,34 @@ for file in files: False:'%s' }[info['len']>1]%label if key not in table.labels: file['croak'].write('column %s not found...\n'%key) - missingColumns |= info['mandatory'] # break if label is mandatory + missingColumns |= info['mandatory'] # break if label is mandatory else: active[datatype].append(label) - column[datatype][label] = table.labels.index(key) # remember columns of requested data + column[datatype][label] = table.labels.index(key) # remember columns of requested data if missingColumns: continue - + # ------------------------------------------ assemble header --------------------------------------- table.labels_append(['%i_Cauchy'%(i+1) - for i in xrange(datainfo['stress']['len'])]) # extend ASCII header with new labels - -# ------------------------------------------ assemble header --------------------------------------- + for i in xrange(datainfo['stress']['len'])]) # extend ASCII header with new labels table.head_write() -# ------------------------------------------ process data --------------------------------------- +# ------------------------------------------ process data --------------------------------------- outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - - F = numpy.array(map(float,table.data[column['defgrad'][active['defgrad'][0]]: - column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3) - P = numpy.array(map(float,table.data[column['stress'][active['stress'][0]]: - column['stress'][active['stress'][0]]+datainfo['stress']['len']]),'d').reshape(3,3) + table.data_rewind() + while outputAlive and table.data_read(): # read next data line of ASCII table + F = np.array(map(float,table.data[column['defgrad'][active['defgrad'][0]]: + column['defgrad'][active['defgrad'][0]]+datainfo['defgrad']['len']]),'d').reshape(3,3) + P = np.array(map(float,table.data[column['stress'][active['stress'][0]]: + column['stress'][active['stress'][0]]+datainfo['stress']['len']]),'d').reshape(3,3) - table.data_append(list(1.0/numpy.linalg.det(F)*numpy.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose] - outputAlive = table.data_write() # output processed line + table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose] + outputAlive = table.data_write() # output processed line -# ------------------------------------------ output result --------------------------------------- - table.output_flush() # just in case of buffered ASCII table +# ------------------------------------------ output result --------------------------------------- + outputAlive and table.output_flush() # just in case of buffered ASCII table - file['input'].close() # close input ASCII table + file['input'].close() # close input ASCII table (works for stdin) + file['output'].close() # close output ASCII table (works for stdout) if file['name'] != 'STDIN': - file['output'].close() # close output ASCII table - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new + os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new diff --git a/processing/post/addCompatibilityMismatch.py b/processing/post/addCompatibilityMismatch.py index 8845efd01..658d01112 100755 --- a/processing/post/addCompatibilityMismatch.py +++ b/processing/post/addCompatibilityMismatch.py @@ -1,174 +1,144 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,math,string,numpy,damask -from optparse import OptionParser, Option +import os,re,sys,math,string +import numpy as np +from collections import defaultdict +from optparse import OptionParser +import damask -# ----------------------------- -class extendableOption(Option): -# ----------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) +scriptID = '$Id$' +scriptName = scriptID.split()[1] - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) - -def location(idx,res): - - return ( idx % res[0], \ - (idx // res[0]) % res[1], \ - (idx // res[0] // res[1]) % res[2] ) - -def index(location,res): - - return ( location[0] % res[0] + \ - (location[1] % res[1]) * res[0] + \ - (location[2] % res[2]) * res[0] * res[1] ) # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- -parser = OptionParser(option_class=extendableOption, usage='%prog options file[s]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """ Add column containing debug information Operates on periodic ordered three-dimensional data sets. -""" + string.replace('$Id$','\n','\\n') +""", version = string.replace(scriptID,'\n','\\n') ) - parser.add_option('--no-shape','-s', dest='noShape', action='store_false', \ help='do not calcuate shape mismatch [%default]') parser.add_option('--no-volume','-v', dest='noVolume', action='store_false', \ help='do not calculate volume mismatch [%default]') -parser.add_option('-c','--coordinates', dest='coords', type='string',\ +parser.add_option('-c','--coordinates', dest='coords', type='string', metavar='string', \ help='column heading for coordinates [%default]') -parser.add_option('-f','--deformation', dest='F', action='extend', type='string', \ - help='heading(s) of columns containing deformation tensor values %default') - +parser.add_option('-f','--deformation', dest='defgrad', type='string', metavar='string ', \ + help='column heading for coordinates [%defgrad]') parser.set_defaults(noVolume = False) parser.set_defaults(noShape = False) parser.set_defaults(coords = 'ip') -parser.set_defaults(F = 'f') +parser.set_defaults(defgrad = 'f') (options,filenames) = parser.parse_args() - -datainfo = { # list of requested labels per datatype - 'F': {'len':9, - 'label':[]}, +datainfo = { # list of requested labels per datatype + 'defgrad': {'len':9, + 'label':[]}, } -if options.F != None: datainfo['F']['label'] += options.F - -# ------------------------------------------ setup file handles --------------------------------------- +datainfo['defgrad']['label'].append(options.defgrad) +# ------------------------------------------ setup file handles ------------------------------------- files = [] if filenames == []: - files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) + files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) else: for name in filenames: if os.path.exists(name): - files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')}) - -# ------------------------------------------ loop over input files --------------------------------------- + files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) +#--- loop over input files ------------------------------------------------------------------------ for file in files: - if file['name'] != 'STDIN': print file['name'], + if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') + else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') - table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table - table.head_read() # read ASCII header info - table.info_append(string.replace('$Id$','\n','\\n') + \ - '\t' + ' '.join(sys.argv[1:])) + table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table + table.head_read() # read ASCII header info + table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:])) -# --------------- figure out dimension and resolution +# --------------- figure out dimension and resolution -------------------------------------------------- try: - locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data + locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data except ValueError: - print 'no coordinate data found...' + file['croak'].write('no coordinate data found...\n'%key) continue + active = defaultdict(list) + column = defaultdict(dict) + missingColumns = False + + for datatype,info in datainfo.items(): + for label in info['label']: + key = '1_%s'%label + if key not in table.labels: + file['croak'].write('column %s not found...\n'%key) + missingColumns = True + else: + active[datatype].append(label) + column[datatype][label] = table.labels.index(key) # remember columns of requested data + column = table.labels.index(key) + + if missingColumns: + continue + +# --------------- figure out dimension and resolution --------------------------------------------- grid = [{},{},{}] - while table.data_read(): # read next data line of ASCII table + while table.data_read(): # read next data line of ASCII table for j in xrange(3): - grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z - res = numpy.array([len(grid[0]),\ - len(grid[1]),\ - len(grid[2]),],'i') # resolution is number of distinct coordinates found - geomdim = res/numpy.maximum(numpy.ones(3,'d'),res-1.0)* \ - numpy.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\ - max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\ - max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\ - ],'d') # dimension from bounding box, corrected for cell-centeredness + grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z + res = np.array([len(grid[0]),\ + len(grid[1]),\ + len(grid[2]),],'i') # resolution is number of distinct coordinates found + geomdim = res/np.maximum(np.ones(3,'d'),res-1.0)* \ + np.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\ + max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\ + max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\ + ],'d') # dimension from bounding box, corrected for cell-centeredness if res[2] == 1: geomdim[2] = min(geomdim[:2]/res[:2]) - N = res.prod() - print '\t%s @ %s'%(geomdim,res) - - -# --------------- figure out columns to process - - key = '1_%s' %options.F - if key not in table.labels: - sys.stderr.write('column %s not found...\n'%key) - else: - F = numpy.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res)) - if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.F]) - if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.F]) - column = table.labels.index(key) - -# ------------------------------------------ assemble header --------------------------------------- +# ------------------------------------------ assemble header --------------------------------------- + if not options.noShape: table.labels_append(['shapeMismatch(%s)' %options.defgrad]) + if not options.noVolume: table.labels_append(['volMismatch(%s)'%options.defgrad]) table.head_write() # ------------------------------------------ read deformation gradient field ----------------------- - table.data_rewind() - + F = np.array([0.0 for i in xrange(N*9)]).reshape([3,3]+list(res)) idx = 0 - while table.data_read(): # read next data line of ASCII table - (x,y,z) = location(idx,res) # figure out (x,y,z) position from line count + (x,y,z) = damask.gridLocation(idx,res) # figure out (x,y,z) position from line count idx += 1 - F[0:3,0:3,x,y,z] = numpy.array(map(float,table.data[column:column+9]),'d').reshape(3,3) + F[0:3,0:3,x,y,z] = np.array(map(float,table.data[column:column+9]),'d').reshape(3,3) Favg = damask.core.math.tensorAvg(F) - - if (res[0]%2 != 0 or res[1]%2 != 0 or (res[2] != 1 and res[2]%2 !=0)): - print 'using linear reconstruction for uneven resolution' - centres = damask.core.mesh.deformedCoordsLin(geomdim,F,Favg) - else: - centres = damask.core.mesh.deformedCoordsFFT(geomdim,F,1.0,Favg) + centres = damask.core.mesh.deformedCoordsFFT(geomdim,F,Favg,[1.0,1.0,1.0]) nodes = damask.core.mesh.nodesAroundCentres(geomdim,Favg,centres) if not options.noShape: shapeMismatch = damask.core.mesh.shapeMismatch( geomdim,F,nodes,centres) if not options.noVolume: volumeMismatch = damask.core.mesh.volumeMismatch(geomdim,F,nodes) -# ------------------------------------------ process data --------------------------------------- - +# ------------------------------------------ process data --------------------------------------- table.data_rewind() + outputAlive = True idx = 0 - while table.data_read(): # read next data line of ASCII table - (x,y,z) = location(idx,res) # figure out (x,y,z) position from line count + while outputAlive and table.data_read(): # read next data line of ASCII table + (x,y,z) = damask.gridLocation(idx,res ) # figure out (x,y,z) position from line count idx += 1 if not options.noShape: table.data_append( shapeMismatch[x,y,z]) if not options.noVolume: table.data_append(volumeMismatch[x,y,z]) - table.data_write() # output processed line + outputAlive = table.data_write() # output processed line # ------------------------------------------ output result --------------------------------------- + outputAlive and table.output_flush() # just in case of buffered ASCII table - table.output_flush() # just in case of buffered ASCII table - - file['input'].close() # close input ASCII table + file['input'].close() # close input ASCII table (works for stdin) + file['output'].close() # close output ASCII table (works for stdout) if file['name'] != 'STDIN': - file['output'].close # close output ASCII table - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new + os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new diff --git a/processing/post/addCurl.py b/processing/post/addCurl.py index 2e6b17dbf..ebf3cbbd1 100755 --- a/processing/post/addCurl.py +++ b/processing/post/addCurl.py @@ -1,59 +1,33 @@ #!/usr/bin/env python # -*- coding: UTF-8 no BOM -*- -import os,re,sys,math,string,numpy,damask -from optparse import OptionParser, Option - -# ----------------------------- -class extendableOption(Option): -# ----------------------------- -# used for definition of new option parser action 'extend', which enables to take multiple option arguments -# taken from online tutorial http://docs.python.org/library/optparse.html - - ACTIONS = Option.ACTIONS + ("extend",) - STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",) - TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",) - ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "extend": - lvalue = value.split(",") - values.ensure_value(dest, []).extend(lvalue) - else: - Option.take_action(self, action, dest, opt, value, values, parser) - -def location(idx,res): - return ( idx % res[0], \ - ( idx // res[0]) % res[1], \ - ( idx // res[0] // res[1]) % res[2] ) - -def index(location,res): - return ( location[0] % res[0] + \ - ( location[1] % res[1]) * res[0] + \ - ( location[2] % res[2]) * res[1] * res[0] ) - +import os,re,sys,math,string +import numpy as np +from optparse import OptionParser +import damask +scriptID = '$Id$' +scriptName = scriptID.split()[1] # -------------------------------------------------------------------- # MAIN # -------------------------------------------------------------------- -parser = OptionParser(option_class=extendableOption, usage='%prog options [file[s]]', description = """ +parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ Add column(s) containing curl of requested column(s). Operates on periodic ordered three-dimensional data sets. Deals with both vector- and tensor-valued fields. -""" + string.replace('$Id$','\n','\\n') +""", version=string.replace('$Id$','\n','\\n') ) -parser.add_option('-c','--coordinates', dest='coords', type='string',\ +parser.add_option('-c','--coordinates', dest='coords', type='string', metavar='string', \ help='column heading for coordinates [%default]') -parser.add_option('-v','--vector', dest='vector', action='extend', type='string', \ +parser.add_option('-v','--vector', dest='vector', action='extend', type='string', metavar='', \ help='heading of columns containing vector field values') -parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', \ +parser.add_option('-t','--tensor', dest='tensor', action='extend', type='string', metavar='', \ help='heading of columns containing tensor field values') - parser.set_defaults(coords = 'ip') parser.set_defaults(vector = []) parser.set_defaults(tensor = []) @@ -63,7 +37,7 @@ parser.set_defaults(tensor = []) if len(options.vector) + len(options.tensor) == 0: parser.error('no data column specified...') -datainfo = { # list of requested labels per datatype +datainfo = { # list of requested labels per datatype 'vector': {'len':3, 'label':[]}, 'tensor': {'len':9, @@ -73,43 +47,40 @@ datainfo = { # lis if options.vector != None: datainfo['vector']['label'] += options.vector if options.tensor != None: datainfo['tensor']['label'] += options.tensor -# ------------------------------------------ setup file handles --------------------------------------- - +# ------------------------------------------ setup file handles ------------------------------------ files = [] if filenames == []: - files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout}) + files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr}) else: for name in filenames: if os.path.exists(name): - files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w')}) - - -# ------------------------------------------ loop over input files --------------------------------------- + files.append({'name':name, 'input':open(name), 'output':open(name+'_tmp','w'), 'croak':sys.stderr}) +#--- loop over input files ------------------------------------------------------------------------ for file in files: - if file['name'] != 'STDIN': print file['name'], + if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n') + else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n') - table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table - table.head_read() # read ASCII header info - table.info_append(string.replace('$Id$','\n','\\n') + \ - '\t' + ' '.join(sys.argv[1:])) + table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table + table.head_read() # read ASCII header info + table.info_append(string.replace(scriptID,'\n','\\n') + '\t' + ' '.join(sys.argv[1:])) -# --------------- figure out dimension and resolution +# --------------- figure out dimension and resolution ---------------------------------------------- try: - locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data + locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data except ValueError: - print 'no coordinate data found...' + file['croak'].write('no coordinate data found...\n'%key) continue grid = [{},{},{}] while table.data_read(): # read next data line of ASCII table for j in xrange(3): grid[j][str(table.data[locationCol+j])] = True # remember coordinate along x,y,z - resolution = numpy.array([len(grid[0]),\ + resolution = np.array([len(grid[0]),\ len(grid[1]),\ len(grid[2]),],'i') # resolution is number of distinct coordinates found - dimension = resolution/numpy.maximum(numpy.ones(3,'d'),resolution-1.0)* \ - numpy.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\ + dimension = resolution/np.maximum(np.ones(3,'d'),resolution-1.0)* \ + np.array([max(map(float,grid[0].keys()))-min(map(float,grid[0].keys())),\ max(map(float,grid[1].keys()))-min(map(float,grid[1].keys())),\ max(map(float,grid[2].keys()))-min(map(float,grid[2].keys())),\ ],'d') # dimension from bounding box, corrected for cell-centeredness @@ -117,8 +88,6 @@ for file in files: dimension[2] = min(dimension[:2]/resolution[:2]) N = resolution.prod() - print '\t%s @ %s'%(dimension,resolution) - # --------------- figure out columns to process active = {} @@ -140,59 +109,54 @@ for file in files: if datatype not in values: values[datatype] = {} if datatype not in curl: curl[datatype] = {} active[datatype].append(label) - column[datatype][label] = table.labels.index(key) # remember columns of requested data - values[datatype][label] = numpy.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\ + column[datatype][label] = table.labels.index(key) # remember columns of requested data + values[datatype][label] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\ reshape(list(resolution)+[datainfo[datatype]['len']//3,3]) - curl[datatype][label] = numpy.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\ + curl[datatype][label] = np.array([0.0 for i in xrange(N*datainfo[datatype]['len'])]).\ reshape(list(resolution)+[datainfo[datatype]['len']//3,3]) table.labels_append(['%i_curlFFT(%s)'%(i+1,label) - for i in xrange(datainfo[datatype]['len'])]) # extend ASCII header with new labels + for i in xrange(datainfo[datatype]['len'])]) # extend ASCII header with new labels # ------------------------------------------ assemble header --------------------------------------- - table.head_write() -# ------------------------------------------ read value field --------------------------------------- - +# ------------------------------------------ read value field -------------------------------------- table.data_rewind() - idx = 0 - while table.data_read(): # read next data line of ASCII table - (x,y,z) = location(idx,resolution) # figure out (x,y,z) position from line count + while table.data_read(): # read next data line of ASCII table + (x,y,z) = damask.gridLocation(idx,resolution) # figure out (x,y,z) position from line count idx += 1 - for datatype,labels in active.items(): # loop over vector,tensor - for label in labels: # loop over all requested curls - values[datatype][label][x,y,z] = numpy.array( - map(float,table.data[column[datatype][label]: - column[datatype][label]+datainfo[datatype]['len']]),'d').reshape(datainfo[datatype]['len']//3,3) + for datatype,labels in active.items(): # loop over vector,tensor + for label in labels: # loop over all requested curls + values[datatype][label][x,y,z] = np.array( + map(float,table.data[column[datatype][label]: + column[datatype][label]+datainfo[datatype]['len']]),'d') \ + .reshape(datainfo[datatype]['len']//3,3) -# ------------------------------------------ process value field --------------------------------------- - - for datatype,labels in active.items(): # loop over vector,tensor - for label in labels: # loop over all requested curls +# ------------------------------------------ process value field ----------------------------------- + for datatype,labels in active.items(): # loop over vector,tensor + for label in labels: # loop over all requested curls curl[datatype][label] = damask.core.math.curlFFT(dimension,values[datatype][label]) -# ------------------------------------------ process data --------------------------------------- - +# ------------------------------------------ process data --------------------------------------- table.data_rewind() + outputAlive = True idx = 0 - while table.data_read(): # read next data line of ASCII table - (x,y,z) = location(idx,resolution) # figure out (x,y,z) position from line count + while outputAlive and table.data_read(): # read next data line of ASCII table + (x,y,z) = damask.gridLocation(idx,resolution) # figure out (x,y,z) position from line count idx += 1 - - for datatype,labels in active.items(): # loop over vector,tensor - for label in labels: # loop over all requested norms + for datatype,labels in active.items(): # loop over vector,tensor + for label in labels: # loop over all requested norms table.data_append(list(curl[datatype][label][x,y,z].reshape(datainfo[datatype]['len']))) - table.data_write() # output processed line + outputAlive = table.data_write() # output processed line # ------------------------------------------ output result --------------------------------------- + outputAlive and table.output_flush() # just in case of buffered ASCII table - table.output_flush() # just in case of buffered ASCII table - - file['input'].close() # close input ASCII table + file['input'].close() # close input ASCII table (works for stdin) + file['output'].close() # close output ASCII table (works for stdout) if file['name'] != 'STDIN': - file['output'].close # close output ASCII table - os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new + os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new