python 3 compatibility

This commit is contained in:
Martin Diehl 2016-10-24 21:16:29 +02:00
parent a84e7310f5
commit 8a94f55a2e
59 changed files with 328 additions and 339 deletions

View File

@ -84,7 +84,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def _quote(self,
what):
"""quote empty or white space-containing output"""
"""Quote empty or white space-containing output"""
import re
return '{quote}{content}{quote}'.format(
@ -107,7 +107,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def output_write(self,
what):
"""aggregate a single row (string) or list of (possibly containing further lists of) rows into output"""
"""Aggregate a single row (string) or list of (possibly containing further lists of) rows into output"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.output_write(item)
@ -147,7 +147,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def head_read(self):
"""
get column labels
Get column labels
by either reading the first row or,
if keyword "head[*]" is present, the last line of the header
@ -200,7 +200,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def head_write(self,
header = True):
"""write current header information (info + labels)"""
"""Write current header information (info + labels)"""
head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else []
head.append(self.info)
if self.__IO__['labeled']: head.append('\t'.join(map(self._quote,self.tags)))
@ -209,7 +209,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def head_getGeom(self):
"""interpret geom header"""
"""Interpret geom header"""
identifiers = {
'grid': ['a','b','c'],
'size': ['x','y','z'],
@ -249,7 +249,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def head_putGeom(self,info):
"""translate geometry description to header"""
"""Translate geometry description to header"""
self.info_append([
"grid\ta {}\tb {}\tc {}".format(*info['grid']),
"size\tx {}\ty {}\tz {}".format(*info['size']),
@ -262,7 +262,7 @@ class ASCIItable():
def labels_append(self,
what,
reset = False):
"""add item or list to existing set of labels (and switch on labeling)"""
"""Add item or list to existing set of labels (and switch on labeling)"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.labels_append(item)
@ -276,7 +276,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def labels_clear(self):
"""delete existing labels and switch to no labeling"""
"""Delete existing labels and switch to no labeling"""
self.tags = []
self.__IO__['labeled'] = False
@ -285,7 +285,7 @@ class ASCIItable():
tags = None,
raw = False):
"""
tell abstract labels.
Tell abstract labels.
"x" for "1_x","2_x",... unless raw output is requested.
operates on object tags or given list.
@ -322,7 +322,7 @@ class ASCIItable():
def label_index(self,
labels):
"""
tell index of column label(s).
Tell index of column label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
@ -363,7 +363,7 @@ class ASCIItable():
def label_dimension(self,
labels):
"""
tell dimension (length) of column label(s).
Tell dimension (length) of column label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
@ -417,7 +417,7 @@ class ASCIItable():
def label_indexrange(self,
labels):
"""
tell index range for given label(s).
Tell index range for given label(s).
return numpy array if asked for list of labels.
transparently deals with label positions implicitly given as numbers or their headings given as strings.
@ -434,7 +434,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def info_append(self,
what):
"""add item or list to existing set of infos"""
"""Add item or list to existing set of infos"""
if not isinstance(what, (str, unicode)):
try:
for item in what: self.info_append(item)
@ -445,7 +445,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def info_clear(self):
"""delete any info block"""
"""Delete any info block"""
self.info = []
# ------------------------------------------------------------------
@ -458,7 +458,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def data_skipLines(self,
count):
"""wind forward by count number of lines"""
"""Wind forward by count number of lines"""
for i in range(count):
alive = self.data_read()
@ -468,7 +468,7 @@ class ASCIItable():
def data_read(self,
advance = True,
respectLabels = True):
"""read next line (possibly buffered) and parse it into data array"""
"""Read next line (possibly buffered) and parse it into data array"""
import shlex
self.line = self.__IO__['readBuffer'].pop(0) if len(self.__IO__['readBuffer']) > 0 \
@ -490,7 +490,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def data_readArray(self,
labels = []):
"""read whole data of all (given) labels as numpy array"""
"""Read whole data of all (given) labels as numpy array"""
from collections import Iterable
try:
@ -527,7 +527,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def data_write(self,
delimiter = '\t'):
"""write current data array and report alive output back"""
"""Write current data array and report alive output back"""
if len(self.data) == 0: return True
if isinstance(self.data[0],list):
@ -539,7 +539,7 @@ class ASCIItable():
def data_writeArray(self,
fmt = None,
delimiter = '\t'):
"""write whole numpy array data"""
"""Write whole numpy array data"""
for row in self.data:
try:
output = [fmt % value for value in row] if fmt else list(map(repr,row))
@ -562,7 +562,7 @@ class ASCIItable():
# ------------------------------------------------------------------
def data_set(self,
what, where):
"""update data entry in column "where". grows data array if needed."""
"""Update data entry in column "where". grows data array if needed."""
idx = -1
try:
idx = self.label_index(where)
@ -589,7 +589,7 @@ class ASCIItable():
grid,
type = 'i',
strict = False):
"""read microstructure data (from .geom format)"""
"""Read microstructure data (from .geom format)"""
def datatype(item):
return int(item) if type.lower() == 'i' else float(item)

View File

@ -104,7 +104,7 @@ class Material():
__slots__ = ['data']
def __init__(self,verbose=True):
"""generates ordered list of parts"""
"""Generates ordered list of parts"""
self.parts = [
'homogenization',
'microstructure',
@ -122,7 +122,7 @@ class Material():
self.verbose = verbose
def __repr__(self):
"""returns current configuration to be used as material.config"""
"""Returns current configuration to be used as material.config"""
me = []
for part in self.parts:
if self.verbose: print('doing '+part)

View File

@ -24,7 +24,7 @@ except(NameError):
def lables_to_path(label, dsXMLPath=None):
"""read the xml definition file and return the path."""
"""Read the xml definition file and return the path."""
if dsXMLPath is None:
# use the default storage layout in DS_HDF5.xml
if "h5table.pyc" in __file__:
@ -48,7 +48,7 @@ def lables_to_path(label, dsXMLPath=None):
class H5Table(object):
"""light weight interface class for h5py
"""Light weight interface class for h5py
DESCRIPTION
-----------
@ -85,7 +85,7 @@ class H5Table(object):
h5f['/'].attrs['description'] = msg
def del_entry(self, feature_name):
"""delete entry in HDF5 table"""
"""Delete entry in HDF5 table"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
@ -106,7 +106,7 @@ class H5Table(object):
h5f.flush()
def get_data(self, feature_name=None):
"""extract dataset from HDF5 table and return it in a numpy array"""
"""Extract dataset from HDF5 table and return it in a numpy array"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
@ -116,7 +116,7 @@ class H5Table(object):
return rst_data
def add_data(self, feature_name, dataset, cmd_log=None):
"""adding new feature into existing HDF5 file"""
"""Adding new feature into existing HDF5 file"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
@ -126,8 +126,7 @@ class H5Table(object):
# record its state as fresh in the cmd log.
try:
del h5f[h5f_path]
print "***deleting old {} from {}".format(feature_name,
self.h5f_path)
print("***deleting old {} from {}".format(feature_name,self.h5f_path))
except:
# if no cmd log, None will used
cmd_log = str(cmd_log) + " [FRESH]"
@ -138,7 +137,7 @@ class H5Table(object):
h5f.flush()
def get_cmdlog(self, feature_name):
"""get cmd history used to generate the feature"""
"""Get cmd history used to generate the feature"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:

View File

@ -1,6 +1,5 @@
# -*- coding: UTF-8 no BOM -*-
import os,sys,shutil
import logging,logging.config
import damask
@ -191,7 +190,7 @@ class Test():
def copy(self, mapA, mapB,
A = [], B = []):
"""
copy list of files from (mapped) source to target.
Copy list of files from (mapped) source to target.
mapA/B is one of self.fileInX.
"""
@ -382,7 +381,7 @@ class Test():
line0 += 1
for i in range(dataLength):
if not perLine: norm[i] = [np.max(norm[i]) for j in xrange(line0-len(skipLines))]
if not perLine: norm[i] = [np.max(norm[i]) for j in range(line0-len(skipLines))]
data[i] = np.reshape(data[i],[line0-len(skipLines),length[i]])
if any(norm[i]) == 0.0 or absTol[i]:
norm[i] = [1.0 for j in range(line0-len(skipLines))]
@ -425,7 +424,7 @@ class Test():
stdTol = 1.0e-6,
preFilter = 1.0e-9):
"""
calculate statistics of tables
Calculate statistics of tables
threshold can be used to ignore small values (a negative number disables this feature)
"""
@ -478,7 +477,7 @@ class Test():
rtol = 1e-5,
atol = 1e-8,
debug = False):
"""compare multiple tables with np.allclose"""
"""Compare multiple tables with np.allclose"""
if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested
files = [str(files)]

View File

@ -1,6 +1,5 @@
# -*- coding: UTF-8 no BOM -*-
# damask utility functions
import sys,time,random,threading,os,subprocess,shlex
import numpy as np
from optparse import Option
@ -36,7 +35,7 @@ class bcolors:
# -----------------------------
def srepr(arg,glue = '\n'):
"""joins arguments as individual lines"""
"""Joins arguments as individual lines"""
if (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__")):
@ -45,21 +44,21 @@ def srepr(arg,glue = '\n'):
# -----------------------------
def croak(what, newline = True):
"""writes formated to stderr"""
"""Writes formated to stderr"""
sys.stderr.write(srepr(what,glue = '\n') + ('\n' if newline else ''))
sys.stderr.flush()
# -----------------------------
def report(who = None,
what = None):
"""reports script and file name"""
"""Reports script and file name"""
croak( (emph(who)+': ' if who else '') + (what if what else '') )
# -----------------------------
def report_geom(info,
what = ['grid','size','origin','homogenization','microstructures']):
"""reports (selected) geometry information"""
"""Reports (selected) geometry information"""
output = {
'grid' : 'grid a b c: {}'.format(' x '.join(map(str,info['grid' ]))),
'size' : 'size x y z: {}'.format(' x '.join(map(str,info['size' ]))),
@ -71,24 +70,24 @@ def report_geom(info,
# -----------------------------
def emph(what):
"""boldens string"""
"""Boldens string"""
return bcolors.BOLD+srepr(what)+bcolors.ENDC
# -----------------------------
def deemph(what):
"""dims string"""
"""Dims string"""
return bcolors.DIM+srepr(what)+bcolors.ENDC
# -----------------------------
def delete(what):
"""dims string"""
"""Dims string"""
return bcolors.DIM+srepr(what)+bcolors.ENDC
# -----------------------------
def execute(cmd,
streamIn = None,
wd = './'):
"""executes a command in given directory and returns stdout and stderr for optional stdin"""
"""Executes a command in given directory and returns stdout and stderr for optional stdin"""
initialPath = os.getcwd()
os.chdir(wd)
process = subprocess.Popen(shlex.split(cmd),
@ -127,7 +126,7 @@ def gridIndex(location,res):
# -----------------------------
class extendableOption(Option):
"""
used for definition of new option parser action 'extend', which enables to take multiple option arguments
Used for definition of new option parser action 'extend', which enables to take multiple option arguments
taken from online tutorial http://docs.python.org/library/optparse.html
"""
@ -146,7 +145,7 @@ class extendableOption(Option):
# -----------------------------
class backgroundMessage(threading.Thread):
"""reporting with animation to indicate progress"""
"""Reporting with animation to indicate progress"""
choices = {'bounce': ['_', 'o', 'O', '°', '', '', '°', 'O', 'o', '_'],
'spin': ['', '', '', ''],
@ -168,7 +167,7 @@ class backgroundMessage(threading.Thread):
}
def __init__(self,symbol = None,wait = 0.1):
"""sets animation symbol"""
"""Sets animation symbol"""
super(backgroundMessage, self).__init__()
self._stop = threading.Event()
self.message = ''
@ -179,7 +178,7 @@ class backgroundMessage(threading.Thread):
self.waittime = wait
def __quit__(self):
"""cleans output"""
"""Cleans output"""
length = len(self.symbols[self.counter] + self.gap + self.message)
sys.stderr.write(chr(8)*length + ' '*length + chr(8)*length)
sys.stderr.write('')
@ -282,7 +281,7 @@ def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
return grad
def _int2extFunc(bounds):
"""transform internal parameters into external parameters."""
"""Transform internal parameters into external parameters."""
local = [_int2extLocal(b) for b in bounds]
def _transform_i2e(p_int):
p_ext = np.empty_like(p_int)
@ -291,7 +290,7 @@ def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
return _transform_i2e
def _ext2intFunc(bounds):
"""transform external parameters into internal parameters."""
"""Transform external parameters into internal parameters."""
local = [_ext2intLocal(b) for b in bounds]
def _transform_e2i(p_ext):
p_int = np.empty_like(p_ext)
@ -300,7 +299,7 @@ def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
return _transform_e2i
def _int2extLocal(bound):
"""transform a single internal parameter to an external parameter."""
"""Transform a single internal parameter to an external parameter."""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
@ -312,7 +311,7 @@ def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
return lambda x: lower + ((upper - lower)/2.0)*(np.sin(x) + 1.0)
def _ext2intLocal(bound):
"""transform a single external parameter to an internal parameter."""
"""Transform a single external parameter to an internal parameter."""
lower, upper = bound
if lower is None and upper is None: # no constraints
return lambda x: x
@ -469,4 +468,4 @@ def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw):
else:
pcov = np.inf
return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov)
return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov)

View File

@ -69,7 +69,7 @@ for name in filenames:
if columnMissing: continue
# ------------------------------------------ assemble header ---------------------------------------
table.labels_append(['%i_coord'%(i+1) for i in xrange(3)]) # extend ASCII header with new labels
table.labels_append(['%i_coord'%(i+1) for i in range(3)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
@ -94,4 +94,4 @@ for name in filenames:
table.input_close() # close input ASCII table (works for stdin)
table.output_close() # close output ASCII table (works for stdout)
if file['name'] != 'STDIN':
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new
os.rename(file['name']+'_tmp',file['name']) # overwrite old one with tmp new

View File

@ -53,7 +53,7 @@ if options.labels is None or options.formulas is None:
if len(options.labels) != len(options.formulas):
parser.error('number of labels ({}) and formulas ({}) do not match.'.format(len(options.labels),len(options.formulas)))
for i in xrange(len(options.formulas)):
for i in range(len(options.formulas)):
options.formulas[i] = options.formulas[i].replace(';',',')
# ------------------------------------- loop over input files --------------------------------------
@ -154,7 +154,7 @@ for name in filenames:
# ----------------------------------- line 1: assemble header --------------------------------------
for newby in newbies:
table.labels_append(['{}_{}'.format(i+1,newby) for i in xrange(resultDim[newby])]
table.labels_append(['{}_{}'.format(i+1,newby) for i in range(resultDim[newby])]
if resultDim[newby] > 1 else newby)
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))

View File

@ -68,7 +68,7 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_Cauchy'.format(i+1) for i in xrange(9)]) # extend ASCII header with new labels
table.labels_append(['{}_Cauchy'.format(i+1) for i in range(9)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -17,7 +17,7 @@ def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in xrange(datalen):
for i in range(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
@ -33,7 +33,7 @@ def cell2node(cellData,grid):
#--------------------------------------------------------------------------------------------------
def deformationAvgFFT(F,grid,size,nodal=False,transformed=False):
"""calculate average cell center (or nodal) deformation for deformation gradient field specified in each grid cell"""
"""Calculate average cell center (or nodal) deformation for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]),
np.linspace(0,size[1],1+grid[1]),
@ -55,7 +55,7 @@ def deformationAvgFFT(F,grid,size,nodal=False,transformed=False):
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
"""Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / math.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
@ -131,7 +131,7 @@ def volTetrahedron(coords):
def volumeMismatch(size,F,nodes):
"""
calculates the volume mismatch
Calculates the volume mismatch
volume mismatch is defined as the difference between volume of reconstructed
(compatible) cube and determinant of defgrad at the FP
@ -142,9 +142,9 @@ def volumeMismatch(size,F,nodes):
#--------------------------------------------------------------------------------------------------
# calculate actual volume and volume resulting from deformation gradient
for k in xrange(grid[2]):
for j in xrange(grid[1]):
for i in xrange(grid[0]):
for k in range(grid[2]):
for j in range(grid[1]):
for i in range(grid[0]):
coords[0,0:3] = nodes[k, j, i ,0:3]
coords[1,0:3] = nodes[k ,j, i+1,0:3]
coords[2,0:3] = nodes[k ,j+1,i+1,0:3]
@ -190,9 +190,9 @@ def shapeMismatch(size,F,nodes,centres):
#--------------------------------------------------------------------------------------------------
# compare deformed original and deformed positions to actual positions
for k in xrange(grid[2]):
for j in xrange(grid[1]):
for i in xrange(grid[0]):
for k in range(grid[2]):
for j in range(grid[1]):
for i in range(grid[0]):
sMismatch[k,j,i] = \
+ np.linalg.norm(nodes[k, j, i ,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[0,0:3]))\
+ np.linalg.norm(nodes[k, j, i+1,0:3] - centres[k,j,i,0:3] - np.dot(F[k,j,i,:,:], coordsInitial[1,0:3]))\
@ -288,7 +288,7 @@ for name in filenames:
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,9+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,9+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -324,9 +324,9 @@ for name in filenames:
volumeMismatch = volumeMismatch(size,table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),nodes)
# ------------------------------------------ output data -------------------------------------------
for i in xrange(grid[2]):
for j in xrange(grid[1]):
for k in xrange(grid[0]):
for i in range(grid[2]):
for j in range(grid[1]):
for k in range(grid[0]):
table.data_read()
if options.shape: table.data_append(shapeMismatch[i,j,k])
if options.volume: table.data_append(volumeMismatch[i,j,k])

View File

@ -59,7 +59,7 @@ for name in filenames:
dims.append(dim)
columns.append(table.label_index(what))
table.labels_append('cum({})'.format(what) if dim == 1 else
['{}_cum({})'.format(i+1,what) for i in xrange(dim)] ) # extend ASCII header with new labels
['{}_cum({})'.format(i+1,what) for i in range(dim)] ) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:

View File

@ -24,24 +24,24 @@ def curlFFT(geomdim,field):
# differentiation in Fourier space
k_s = np.zeros([3],'i')
TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]):
for i in range(grid[2]):
k_s[0] = i
if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif i > grid[2]//2: k_s[0] -= grid[2]
for j in xrange(grid[1]):
for j in range(grid[1]):
k_s[1] = j
if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif j > grid[1]//2: k_s[1] -= grid[1]
for k in xrange(grid[0]//2+1):
for k in range(grid[0]//2+1):
k_s[2] = k
if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order
if dataType == 'tensor':
for l in xrange(3):
for l in range(3):
curl_fourier[i,j,k,0,l] = ( field_fourier[i,j,k,l,2]*xi[1]\
-field_fourier[i,j,k,l,1]*xi[2]) *TWOPIIMG
curl_fourier[i,j,k,1,l] = (-field_fourier[i,j,k,l,2]*xi[0]\
@ -136,14 +136,14 @@ for name in filenames:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_curlFFT({})'.format(i+1,label) for i in xrange(data['dim'])]) # extend ASCII header with new labels
table.labels_append(['{}_curlFFT({})'.format(i+1,label) for i in range(data['dim'])]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')

View File

@ -85,7 +85,7 @@ for name in filenames:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_dev({})'.format(i+1,label) for i in xrange(data['dim'])] + \
table.labels_append(['{}_dev({})'.format(i+1,label) for i in range(data['dim'])] + \
(['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels
table.head_write()
@ -101,4 +101,4 @@ for name in filenames:
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
table.close() # close input ASCII table (works for stdin)

View File

@ -17,7 +17,7 @@ def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in xrange(datalen):
for i in range(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid[::-1])+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
@ -33,7 +33,7 @@ def cell2node(cellData,grid):
#--------------------------------------------------------------------------------------------------
def displacementAvgFFT(F,grid,size,nodal=False,transformed=False):
"""calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
"""Calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[2],1+grid[2]),
np.linspace(0,size[1],1+grid[1]),
@ -55,7 +55,7 @@ def displacementAvgFFT(F,grid,size,nodal=False,transformed=False):
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
"""Calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / math.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
@ -167,7 +167,7 @@ for name in filenames:
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,9+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,9+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -196,16 +196,16 @@ for name in filenames:
table.labels_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) +
['{}_avg({}).{}' .format(i+1,options.defgrad,options.pos) for i in xrange(3)] +
['{}_fluct({}).{}'.format(i+1,options.defgrad,options.pos) for i in xrange(3)] )
table.labels_append((['{}_pos' .format(i+1) for i in range(3)] if options.nodal else []) +
['{}_avg({}).{}' .format(i+1,options.defgrad,options.pos) for i in range(3)] +
['{}_fluct({}).{}'.format(i+1,options.defgrad,options.pos) for i in range(3)] )
table.head_write()
# ------------------------------------------ output data -------------------------------------------
Zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else xrange(grid[2])
Yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else xrange(grid[1])
Xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else xrange(grid[0])
Zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else range(grid[2])
Yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else range(grid[1])
Xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else range(grid[0])
for i,z in enumerate(Zrange):
for j,y in enumerate(Yrange):

View File

@ -21,23 +21,23 @@ def divFFT(geomdim,field):
# differentiation in Fourier space
k_s = np.zeros([3],'i')
TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]):
for i in range(grid[2]):
k_s[0] = i
if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif i > grid[2]//2: k_s[0] -= grid[2]
for j in xrange(grid[1]):
for j in range(grid[1]):
k_s[1] = j
if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif j > grid[1]//2: k_s[1] -= grid[1]
for k in xrange(grid[0]//2+1):
for k in range(grid[0]//2+1):
k_s[2] = k
if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order
if n == 9: # tensor, 3x3 -> 3
for l in xrange(3):
for l in range(3):
div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG
elif n == 3: # vector, 3 -> 1
div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG
@ -123,14 +123,14 @@ for name in filenames:
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['divFFT({})'.format(label) if type == 'vector' else
'{}_divFFT({})'.format(i+1,label) for i in xrange(data['dim']//3)]) # extend ASCII header with new labels
'{}_divFFT({})'.format(i+1,label) for i in range(data['dim']//3)]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')

View File

@ -20,13 +20,13 @@ def periodic_3Dpad(array, rimdim=(1,1,1)):
rimdim[2]:rimdim[2]+size[2]] = array
p = np.zeros(3,'i')
for side in xrange(3):
for p[(side+2)%3] in xrange(padded.shape[(side+2)%3]):
for p[(side+1)%3] in xrange(padded.shape[(side+1)%3]):
for p[side%3] in xrange(rimdim[side%3]):
for side in range(3):
for p[(side+2)%3] in range(padded.shape[(side+2)%3]):
for p[(side+1)%3] in range(padded.shape[(side+1)%3]):
for p[side%3] in range(rimdim[side%3]):
spot = (p-rimdim)%size
padded[p[0],p[1],p[2]] = array[spot[0],spot[1],spot[2]]
for p[side%3] in xrange(rimdim[side%3]+size[side%3],size[side%3]+2*rimdim[side%3]):
for p[side%3] in range(rimdim[side%3]+size[side%3],size[side%3]+2*rimdim[side%3]):
spot = (p-rimdim)%size
padded[p[0],p[1],p[2]] = array[spot[0],spot[1],spot[2]]
return padded
@ -178,7 +178,7 @@ for name in filenames:
table.data_readArray()
coords = [np.unique(table.data[:,coordCol+i]) for i in xrange(coordDim)]
coords = [np.unique(table.data[:,coordCol+i]) for i in range(coordDim)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords)+[1]*(3-len(coords)),'i')
@ -215,7 +215,7 @@ for name in filenames:
# ...reflects number of unique neighbors
uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0])
for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood
for i in range(1,len(neighborhood)): # check remaining points in neighborhood
uniques += np.where(np.logical_and(
diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself?
diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1],
@ -229,7 +229,7 @@ for name in filenames:
distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3
distance = distance.reshape([len(feature_list),grid.prod(),1],order='F')
for i in xrange(len(feature_list)):
for i in range(len(feature_list)):
stack.append(distance[i,:])
# ------------------------------------------ output result -----------------------------------------
@ -239,4 +239,4 @@ for name in filenames:
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)
table.close() # close input ASCII table (works for stdin)

View File

@ -24,17 +24,17 @@ def gradFFT(geomdim,field):
# differentiation in Fourier space
k_s = np.zeros([3],'i')
TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]):
for i in range(grid[2]):
k_s[0] = i
if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif i > grid[2]//2: k_s[0] -= grid[2]
for j in xrange(grid[1]):
for j in range(grid[1]):
k_s[1] = j
if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif j > grid[1]//2: k_s[1] -= grid[1]
for k in xrange(grid[0]//2+1):
for k in range(grid[0]//2+1):
k_s[2] = k
if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
@ -126,14 +126,14 @@ for name in filenames:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_gradFFT({})'.format(i+1,label) for i in xrange(3 * data['dim'])]) # extend ASCII header with new labels
table.labels_append(['{}_gradFFT({})'.format(i+1,label) for i in range(3 * data['dim'])]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')

View File

@ -108,7 +108,7 @@ for name in filenames:
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_IPF_{:g}{:g}{:g}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in xrange(3)])
table.labels_append(['{}_IPF_{:g}{:g}{:g}_{sym}'.format(i+1,*options.pole,sym = options.symmetry.lower()) for i in range(3)])
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -131,9 +131,9 @@ for name in filenames:
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for output in options.output:
if output == 'quaternion': table.labels_append(['{}_{}_{}({})'.format(i+1,'quat',options.symmetry,label) for i in xrange(4)])
elif output == 'rodrigues': table.labels_append(['{}_{}_{}({})'.format(i+1,'rodr',options.symmetry,label) for i in xrange(3)])
elif output == 'eulers': table.labels_append(['{}_{}_{}({})'.format(i+1,'eulr',options.symmetry,label) for i in xrange(3)])
if output == 'quaternion': table.labels_append(['{}_{}_{}({})'.format(i+1,'quat',options.symmetry,label) for i in range(4)])
elif output == 'rodrigues': table.labels_append(['{}_{}_{}({})'.format(i+1,'rodr',options.symmetry,label) for i in range(3)])
elif output == 'eulers': table.labels_append(['{}_{}_{}({})'.format(i+1,'eulr',options.symmetry,label) for i in range(3)])
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -69,7 +69,7 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['%i_S'%(i+1) for i in xrange(9)]) # extend ASCII header with new labels
table.labels_append(['%i_S'%(i+1) for i in range(9)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -113,7 +113,7 @@ for name in filenames:
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in xrange(2)])
table.labels_append(['{}_pole_{}{}{}'.format(i+1,*options.pole) for i in range(2)])
table.head_write()
# ------------------------------------------ process data ------------------------------------------

View File

@ -199,7 +199,7 @@ if options.lattice in latticeChoices[:2]:
c_normal = slipSystems[options.lattice][:,3:]
elif options.lattice == latticeChoices[2]:
# convert 4 Miller index notation of hex to orthogonal 3 Miller index notation
for i in xrange(len(c_direction)):
for i in range(len(c_direction)):
c_direction[i] = np.array([slipSystems['hex'][i,0]*1.5,
(slipSystems['hex'][i,0] + 2.*slipSystems['hex'][i,1])*0.5*np.sqrt(3),
slipSystems['hex'][i,3]*options.CoverA,

View File

@ -57,8 +57,8 @@ for name in filenames:
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else:
items[type]['column'].append(table.label_index(what))
table.labels_append(['{}_eigval({})'.format(i+1,what) for i in xrange(3)]) # extend ASCII header with new labels
table.labels_append(['{}_eigvec({})'.format(i+1,what) for i in xrange(9)]) # extend ASCII header with new labels
table.labels_append(['{}_eigval({})'.format(i+1,what) for i in range(3)]) # extend ASCII header with new labels
table.labels_append(['{}_eigvec({})'.format(i+1,what) for i in range(9)]) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:

View File

@ -112,7 +112,7 @@ for name in filenames:
table.labels_append(['{}_{}({}){}'.format(i+1, # extend ASCII header with new labels
theStrain,
theStretch,
what if what != 'f' else '') for i in xrange(9)])
what if what != 'f' else '') for i in range(9)])
if remarks != []: damask.util.croak(remarks)
if errors != []:

View File

@ -79,7 +79,7 @@ incNum = int(asciiTable.data[asciiTable.label_index('inc'), 0])
fullTable = np.copy(asciiTable.data) # deep copy all data, just to be safe
labels = asciiTable.labels()
labels_idx = [asciiTable.label_index(label) for label in labels]
featuresDim = [labels_idx[i+1] - labels_idx[i] for i in xrange(len(labels)-1)]
featuresDim = [labels_idx[i+1] - labels_idx[i] for i in range(len(labels)-1)]
featuresDim.append(fullTable.shape[1] - labels_idx[-1])
# ----- figure out size and grid ----- #
@ -113,7 +113,7 @@ h5f.add_data("Vz", Vz, cmd_log=cmd_log)
# add the rest of data from table
labelsProcessed = ['inc']
for fi in xrange(len(labels)):
for fi in range(len(labels)):
featureName = labels[fi]
# remove trouble maker "("" and ")" from label/feature name
if "(" in featureName:
@ -136,7 +136,7 @@ for fi in xrange(len(labels)):
# mshGridDim[2],
# dataset.shape[1]))
# write out data
print "adding {}...".format(featureName)
print("adding {}...".format(featureName))
h5f.add_data(featureName, dataset, cmd_log=cmd_log)
# write down the processed label
labelsProcessed.append(featureName)

View File

@ -94,7 +94,7 @@ for name in filenames:
table.data_readArray()
if (any(options.grid) == 0 or any(options.size) == 0.0):
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')

View File

@ -127,12 +127,12 @@ for name in filenames:
weights=None if options.weight is None else table.data[:,2])
if options.normCol:
for x in xrange(options.bins[0]):
for x in range(options.bins[0]):
sum = np.sum(grid[x,:])
if sum > 0.0:
grid[x,:] /= sum
if options.normRow:
for y in xrange(options.bins[1]):
for y in range(options.bins[1]):
sum = np.sum(grid[:,y])
if sum > 0.0:
grid[:,y] /= sum
@ -150,8 +150,8 @@ for name in filenames:
delta[2] = minmax[2,1]-minmax[2,0]
for x in xrange(options.bins[0]):
for y in xrange(options.bins[1]):
for x in range(options.bins[0]):
for y in range(options.bins[1]):
result[x,y,:] = [minmax[0,0]+delta[0]/options.bins[0]*(x+0.5),
minmax[1,0]+delta[1]/options.bins[1]*(y+0.5),
min(1.0,max(0.0,(grid[x,y]-minmax[2,0])/delta[2]))]

View File

@ -79,7 +79,7 @@ for name in filenames:
table.data_readArray(options.pos)
table.data_rewind()
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
coords = [np.unique(table.data[:,i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -99,9 +99,9 @@ for name in filenames:
data = np.zeros(outSize.tolist()+[len(table.labels(raw = True))])
p = np.zeros(3,'i')
for p[2] in xrange(grid[2]):
for p[1] in xrange(grid[1]):
for p[0] in xrange(grid[0]):
for p[2] in range(grid[2]):
for p[1] in range(grid[1]):
for p[0] in range(grid[0]):
d = p*packing
table.data_read()
data[d[0]:d[0]+packing[0],
@ -110,9 +110,9 @@ for name in filenames:
: ] = np.tile(np.array(table.data_asFloat(),'d'),packing.tolist()+[1]) # tile to match blowUp voxel size
elementSize = size/grid/packing
elem = 1
for c in xrange(outSize[2]):
for b in xrange(outSize[1]):
for a in xrange(outSize[0]):
for c in range(outSize[2]):
for b in range(outSize[1]):
for a in range(outSize[0]):
data[a,b,c,colCoord:colCoord+3] = [a+0.5,b+0.5,c+0.5]*elementSize
if colElem != -1: data[a,b,c,colElem] = elem
table.data = data[a,b,c,:].tolist()

View File

@ -110,18 +110,18 @@ for name in filenames:
table.data_readArray()
rows,cols = table.data.shape
table.data = table.data[np.lexsort([table.data[:,grpColumn]])] # sort data by grpColumn
table.data = table.data[np.lexsort([table.data[:,grpColumn]])] # sort data by grpColumn
values,index = np.unique(table.data[:,grpColumn], return_index = True) # unique grpColumn values and their positions
index = np.append(index,rows) # add termination position
grpTable = np.empty((len(values), cols)) # initialize output
values,index = np.unique(table.data[:,grpColumn], return_index = True) # unique grpColumn values and their positions
index = np.append(index,rows) # add termination position
grpTable = np.empty((len(values), cols)) # initialize output
for i in xrange(len(values)): # iterate over groups (unique values in grpColumn)
for i in range(len(values)): # iterate over groups (unique values in grpColumn)
if options.periodic :
grpTable[i] = periodicAverage(table.data[index[i]:index[i+1]],options.boundary) # apply periodicAverage mapping function
grpTable[i] = periodicAverage(table.data[index[i]:index[i+1]],options.boundary) # apply periodicAverage mapping function
else :
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply mapping function
if not options.all: grpTable[i,grpColumn] = table.data[index[i],grpColumn] # restore grouping column value
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply mapping function
if not options.all: grpTable[i,grpColumn] = table.data[index[i],grpColumn] # restore grouping column value
table.data = grpTable

View File

@ -56,7 +56,7 @@ parser.set_defaults(condition=None)
(options, filenames) = parser.parse_args()
# ----- parse formulas ----- #
for i in xrange(len(options.formulas)):
for i in range(len(options.formulas)):
options.formulas[i] = options.formulas[i].replace(';', ',')
# ----- loop over input files ----- #
@ -64,7 +64,7 @@ for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
print "!!!Cannot process {}".format(name)
print("!!!Cannot process {}".format(name))
continue
damask.util.report(scriptName, name)

View File

@ -11,7 +11,7 @@ scriptID = ' '.join([scriptName, damask.version])
def getCauchy(f, p):
"""return Cauchy stress for given f and p"""
"""Return Cauchy stress for given f and p"""
# [Cauchy] = (1/det(F)) * [P].[F_transpose]
f = f.reshape((3, 3))
p = p.reshape((3, 3))

View File

@ -119,7 +119,7 @@ for name in filenames:
# calculate the IPF color
rgbArrays = np.zeros((orieData.shape[0], 3))
for ci in xrange(rgbArrays.shape[0]):
for ci in range(rgbArrays.shape[0]):
if inputtype == 'eulers':
o = damask.Orientation(Eulers=np.array(orieData[ci, :])*toRadians,
symmetry=options.symmetry).reduced()

View File

@ -14,7 +14,7 @@ scriptID = ' '.join([scriptName, damask.version])
# ----- Helper functions ----- #
def calcMises(what, tensor):
"""calculate von Mises equivalent"""
"""Calculate von Mises equivalent"""
dev = tensor - np.trace(tensor)/3.0*np.eye(3)
symdev = 0.5*(dev+dev.T)
return math.sqrt(np.sum(symdev*symdev.T) *
@ -61,7 +61,7 @@ for name in filenames:
# calculate von Mises equivalent row by row
vmStress = np.zeros(tnsr.shape[0])
for ri in xrange(tnsr.shape[0]):
for ri in range(tnsr.shape[0]):
stressTnsr = tnsr[ri, :].reshape(3, 3)
vmStress[ri] = calcMises('stress', stressTnsr)
@ -77,7 +77,7 @@ for name in filenames:
if options.strain is not None:
tnsr = h5f.get_data(options.strain)
vmStrain = np.zeros(tnsr.shape[0])
for ri in xrange(tnsr.shape[0]):
for ri in range(tnsr.shape[0]):
strainTnsr = tnsr[ri, :].reshape(3, 3)
vmStrain[ri] = calcMises('strain', strainTnsr)
label = "Mises{}".format(options.strain)

View File

@ -25,13 +25,13 @@ def operator(stretch, strain, eigenvalues):
def calcEPS(defgrads, stretchType, strainType):
"""calculate specific type of strain tensor"""
"""Calculate specific type of strain tensor"""
eps = np.zeros(defgrads.shape) # initialize container
# TODO:
# this loop can use some performance boost
# (multi-threading?)
for ri in xrange(defgrads.shape[0]):
for ri in range(defgrads.shape[0]):
f = defgrads[ri, :].reshape(3, 3)
U, S, Vh = np.linalg.svd(f)
R = np.dot(U, Vh) # rotation of polar decomposition

View File

@ -29,7 +29,7 @@ scriptID = ' '.join([scriptName,damask.version])
# ----- HELPER FUNCTIONS -----#
def addTopLvlCmt(xmlstr, topLevelCmt):
"""add top level comment to string from ET"""
"""Add top level comment to string from ET"""
# a quick hack to add the top level comment to XML file
# --> somehow Elementtree does not provide this functionality
# --> by default
@ -42,7 +42,7 @@ def addTopLvlCmt(xmlstr, topLevelCmt):
# MAIN
# --------------------------------------------------------------------
msg = 'generate Xdmf wrapper for HDF5 file.'
msg = 'Generate Xdmf wrapper for HDF5 file.'
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description = msg,
@ -108,7 +108,7 @@ labelsProcessed = ['Vx', 'Vy', 'Vz']
# walk through each attributes
for label in labels:
if label in labelsProcessed: continue
print "adding {}...".format(label)
print("adding {}...".format(label))
attr = ET.SubElement(grid, 'Attribute',
Name=label,
Type="None",

View File

@ -21,24 +21,24 @@ scriptID = ' '.join([scriptName, damask.version])
# ----- HELPER FUNCTION ----- #
def getMeshFromXYZ(xyzArray, mode):
"""calc Vx,Vy,Vz vectors for vtk rectangular mesh"""
"""Calc Vx,Vy,Vz vectors for vtk rectangular mesh"""
# NOTE:
# --> np.unique will automatically sort the list
# --> although not exactly n(1), but since mesh dimension should
# small anyway, so this is still light weight.
dim = xyzArray.shape[1] # 2D:2, 3D:3
coords = [np.unique(xyzArray[:, i]) for i in xrange(dim)]
coords = [np.unique(xyzArray[:, i]) for i in range(dim)]
if mode == 'cell':
# since x, y, z might now have the same number of elements,
# we have to deal with them individually
for ri in xrange(dim):
for ri in range(dim):
vctr_pt = coords[ri]
vctr_cell = np.empty(len(vctr_pt)+1)
# calculate first and last end point
vctr_cell[0] = vctr_pt[0] - 0.5*abs(vctr_pt[1] - vctr_pt[0])
vctr_cell[-1] = vctr_pt[-1] + 0.5*abs(vctr_pt[-2] - vctr_pt[-1])
for cj in xrange(1, len(vctr_cell)-1):
for cj in range(1, len(vctr_cell)-1):
vctr_cell[cj] = 0.5*(vctr_pt[cj-1] + vctr_pt[cj])
# update the coords
coords[ri] = vctr_cell

View File

@ -168,8 +168,8 @@ for name in filenames:
im = Image.new('RGBA',imagesize)
draw = ImageDraw.Draw(im)
for y in xrange(options.dimension[1]):
for x in xrange(options.dimension[0]):
for y in range(options.dimension[1]):
for x in range(options.dimension[0]):
draw.polygon([nodes[0,x ,y ,options.z],
nodes[1,x ,y ,options.z],
nodes[0,x+1,y ,options.z],

View File

@ -27,9 +27,9 @@ def outStdout(cmd,locals):
exec(cmd[3:])
elif cmd[0:3] == '(?)':
cmd = eval(cmd[3:])
print cmd
print(cmd)
else:
print cmd
print(cmd)
return
@ -121,17 +121,17 @@ if options.inverse:
theMap = theMap.invert()
if options.palettef:
print theMap.export(format='raw',steps=options.colorcount)
print(theMap.export(format='raw',steps=options.colorcount))
elif options.palette:
for theColor in theMap.export(format='list',steps=options.colorcount):
print '\t'.join(map(lambda x: str(int(255*x)),theColor))
print('\t'.join(map(lambda x: str(int(255*x)),theColor)))
else: # connect to Mentat and change colorMap
sys.path.append(damask.solver.Marc().libraryPath())
try:
import py_mentat
print 'waiting to connect...'
print('waiting to connect...')
py_mentat.py_connect('',options.port)
print 'connected...'
print('connected...')
mentat = True
except:
sys.stderr.write('warning: no valid Mentat release found\n')

View File

@ -150,7 +150,7 @@ class MPIEspectral_result: # mimic py_post result object
self.expectedFileSize = self.dataOffset+self.N_increments*(self.tagLen+self.N_elements*self.N_element_scalars*8)
if options.legacy: self.expectedFileSize+=self.expectedFileSize//self.fourByteLimit*8 # add extra 8 bytes for additional headers at 4 GB limits
if self.expectedFileSize != self.filesize:
print '\n**\n* Unexpected file size. Incomplete simulation or file corrupted!\n**'
print('\n**\n* Unexpected file size. Incomplete simulation or file corrupted!\n**')
def __str__(self):
"""Summary of results file"""
@ -288,8 +288,8 @@ class MPIEspectral_result: # mimic py_post result object
self.file.seek(incStart+where)
value = struct.unpack('d',self.file.read(8))[0]
except:
print 'seeking',incStart+where
print 'e',e,'idx',idx
print('seeking {}'.format(incStart+where))
print('e {} idx {}'.format(e,idx))
sys.exit(1)
else:
@ -304,7 +304,7 @@ class MPIEspectral_result: # mimic py_post result object
try:
if where%self.fourByteLimit + 8 >= self.fourByteLimit: # danger of reading into fortran record footer at 4 byte limit
data=''
for i in xrange(8):
for i in range(8):
self.file.seek(incStart+where+(where//self.fourByteLimit)*8+4)
data += self.file.read(1)
where += 1
@ -313,8 +313,8 @@ class MPIEspectral_result: # mimic py_post result object
self.file.seek(incStart+where+(where//self.fourByteLimit)*8+4)
value = struct.unpack('d',self.file.read(8))[0]
except:
print 'seeking',incStart+where+(where//self.fourByteLimit)*8+4
print 'e',e,'idx',idx
print('seeking {}'.format(incStart+where+(where//self.fourByteLimit)*8+4))
print('e {} idx {}'.format(e,idx))
sys.exit(1)
return [elemental_scalar(node,value) for node in self.element(e).items]
@ -327,7 +327,7 @@ class MPIEspectral_result: # mimic py_post result object
# -----------------------------
def ipCoords(elemType, nodalCoordinates):
"""returns IP coordinates for a given element"""
"""Returns IP coordinates for a given element"""
nodeWeightsPerNode = {
7: [ [27.0, 9.0, 3.0, 9.0, 9.0, 3.0, 1.0, 3.0],
[ 9.0, 27.0, 9.0, 3.0, 3.0, 9.0, 3.0, 1.0],
@ -376,7 +376,7 @@ def ipCoords(elemType, nodalCoordinates):
# -----------------------------
def ipIDs(elemType):
"""returns IP numbers for given element type"""
"""Returns IP numbers for given element type"""
ipPerNode = {
7: [ 1, 2, 4, 3, 5, 6, 8, 7 ],
57: [ 1, 2, 4, 3, 5, 6, 8, 7 ],
@ -392,7 +392,7 @@ def ipIDs(elemType):
# -----------------------------
def substituteLocation(string, mesh, coords):
"""do variable interpolation in group and filter strings"""
"""Do variable interpolation in group and filter strings"""
substitute = string
substitute = substitute.replace('elem', str(mesh[0]))
substitute = substitute.replace('node', str(mesh[1]))
@ -407,7 +407,7 @@ def substituteLocation(string, mesh, coords):
# -----------------------------
def heading(glue,parts):
"""joins pieces from parts by glue. second to last entry in pieces tells multiplicity"""
"""Joins pieces from parts by glue. second to last entry in pieces tells multiplicity"""
header = []
for pieces in parts:
if pieces[-2] == 0:
@ -420,7 +420,7 @@ def heading(glue,parts):
# -----------------------------
def mapIncremental(label, mapping, N, base, new):
"""
applies the function defined by "mapping"
Applies the function defined by "mapping"
(can be either 'min','max','avg', 'sum', or user specified)
to a list of data
@ -450,7 +450,7 @@ def mapIncremental(label, mapping, N, base, new):
# -----------------------------
def OpenPostfile(name,type,nodal = False):
"""open postfile with extrapolation mode 'translate'"""
"""Open postfile with extrapolation mode 'translate'"""
p = {\
'spectral': MPIEspectral_result,\
'marc': post_open,\
@ -463,7 +463,7 @@ def OpenPostfile(name,type,nodal = False):
# -----------------------------
def ParseOutputFormat(filename,what,me):
"""parse .output* files in order to get a list of outputs"""
"""Parse .output* files in order to get a list of outputs"""
content = []
format = {'outputs':{},'specials':{'brothers':[]}}
for prefix in ['']+map(str,range(1,17)):
@ -508,7 +508,7 @@ def ParseOutputFormat(filename,what,me):
# -----------------------------
def ParsePostfile(p,filename, outputFormat):
"""
parse postfile in order to get position and labels of outputs
Parse postfile in order to get position and labels of outputs
needs "outputFormat" for mapping of output names to postfile output indices
"""
@ -592,7 +592,7 @@ def ParsePostfile(p,filename, outputFormat):
try:
stat['LabelOfElementalScalar'][startIndex + offset] = label
except IndexError:
print 'trying to assign %s at position %i+%i'%(label,startIndex,offset)
print('trying to assign {} at position {}+{}'.format(label,startIndex,offset))
sys.exit(1)
offset += 1
@ -821,8 +821,8 @@ bg.set_message('parsing .output files...')
for what in me:
outputFormat[what] = ParseOutputFormat(filename, what, me[what])
if '_id' not in outputFormat[what]['specials']:
print "\nsection '%s' not found in <%s>"%(me[what], what)
print '\n'.join(map(lambda x:' [%s]'%x, outputFormat[what]['specials']['brothers']))
print("\nsection '{}' not found in <{}>".format(me[what], what))
print('\n'.join(map(lambda x:' [%s]'%x, outputFormat[what]['specials']['brothers'])))
bg.set_message('opening result file...')
p = OpenPostfile(filename+extension,options.filetype,options.nodal)
@ -851,17 +851,17 @@ for opt in ['nodalScalar','elemScalar','elemTensor','homogenizationResult','crys
if options.info:
if options.filetype == 'marc':
print '\n\nMentat release %s'%damask.solver.Marc().version('../../')
print('\n\nMentat release {}'.format(damask.solver.Marc().version('../../')))
if options.filetype == 'spectral':
print '\n\n',p
print('\n\n{}'.format(p))
SummarizePostfile(stat)
print '\nUser Defined Outputs'
print('\nUser Defined Outputs')
for what in me:
print '\n ',what,':'
print('\n {}:'.format(what))
for output in outputFormat[what]['outputs']:
print ' ',output
print(' {}'.format(output))
sys.exit(0)
@ -869,7 +869,7 @@ if options.info:
# --- build connectivity maps
elementsOfNode = {}
for e in xrange(stat['NumberOfElements']):
for e in range(stat['NumberOfElements']):
if e%1000 == 0:
bg.set_message('connect elem %i...'%e)
for n in map(p.node_sequence,p.element(e).items):
@ -892,7 +892,7 @@ groupCount = 0
memberCount = 0
if options.nodalScalar:
for n in xrange(stat['NumberOfNodes']):
for n in range(stat['NumberOfNodes']):
if n%1000 == 0:
bg.set_message('scan node %i...'%n)
myNodeID = p.node_id(n)
@ -927,7 +927,7 @@ if options.nodalScalar:
memberCount += 1
else:
for e in xrange(stat['NumberOfElements']):
for e in range(stat['NumberOfElements']):
if e%1000 == 0:
bg.set_message('scan elem %i...'%e)
myElemID = p.element_id(e)
@ -947,27 +947,27 @@ else:
# --- filter valid locations
# generates an expression that is only true for the locations specified by options.filter
filter = substituteLocation(options.filter, [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n])
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
if filter != '' and not eval(filter): # for all filter expressions that are not true:...
continue # ... ignore this data point and continue with next
# --- group data locations
# generates a unique key for a group of separated data based on the separation criterium for the location
grp = substituteLocation('#'.join(options.sep), [myElemID,myNodeID,myIpID,myGrainID], myIpCoordinates[n])
if grp not in index: # create a new group if not yet present
if grp not in index: # create a new group if not yet present
index[grp] = groupCount
groups.append([[0,0,0,0,0.0,0.0,0.0]]) # initialize with avg location
groups.append([[0,0,0,0,0.0,0.0,0.0]]) # initialize with avg location
groupCount += 1
groups[index[grp]][0][:4] = mapIncremental('','unique',
len(groups[index[grp]])-1,
groups[index[grp]][0][:4],
[myElemID,myNodeID,myIpID,myGrainID]) # keep only if unique average location
[myElemID,myNodeID,myIpID,myGrainID]) # keep only if unique average location
groups[index[grp]][0][4:] = mapIncremental('','avg',
len(groups[index[grp]])-1,
groups[index[grp]][0][4:],
myIpCoordinates[n]) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myIpID,myGrainID,n]) # append a new list defining each group member
myIpCoordinates[n]) # incrementally update average location
groups[index[grp]].append([myElemID,myNodeID,myIpID,myGrainID,n]) # append a new list defining each group member
memberCount += 1
@ -996,14 +996,14 @@ if 'none' not in map(str.lower, options.sort):
sortKeys = eval('lambda x:(%s)'%(','.join(theKeys)))
bg.set_message('sorting groups...')
groups.sort(key = sortKeys) # in-place sorting to save mem
groups.sort(key = sortKeys) # in-place sorting to save mem
# --------------------------- create output dir --------------------------------
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):
os.mkdir(dirname,0755)
os.mkdir(dirname,0o755)
fileOpen = False
assembleHeader = True
@ -1049,7 +1049,7 @@ for incCount,position in enumerate(locations): # walk through locations
if options.separateFiles:
if fileOpen:
file.close()
file.close() # noqa
fileOpen = False
outFilename = eval('"'+eval("'%%s_inc%%0%ii%%s.txt'%(math.log10(max(increments+[1]))+1)")\
+'"%(dirname + os.sep + options.prefix + os.path.split(filename)[1],increments[incCount],options.suffix)')
@ -1070,15 +1070,15 @@ for incCount,position in enumerate(locations): # walk through locations
member = 0
for group in groups:
N = 0 # group member counter
for (e,n,i,g,n_local) in group[1:]: # loop over group members
N = 0 # group member counter
for (e,n,i,g,n_local) in group[1:]: # loop over group members
member += 1
if member%1000 == 0:
time_delta = ((len(locations)*memberCount)/float(member+incCount*memberCount)-1.0)*(time.time()-time_start)
bg.set_message('(%02i:%02i:%02i) processing point %i of %i from increment %i (position %i)...'
%(time_delta//3600,time_delta%3600//60,time_delta%60,member,memberCount,increments[incCount],position))
newby = [] # current member's data
newby = [] # current member's data
if options.nodalScalar:
for label in options.nodalScalar:
@ -1126,7 +1126,7 @@ for incCount,position in enumerate(locations): # walk through locations
['Crystallite']*len(options.crystalliteResult) +
['Constitutive']*len(options.constitutiveResult)
):
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
outputIndex = list(zip(*outputFormat[resultType]['outputs'])[0]).index(label) # find the position of this output in the outputFormat
length = int(outputFormat[resultType]['outputs'][outputIndex][1])
thisHead = heading('_',[[component,''.join( label.split() )] for component in range(int(length>1),length+int(length>1))])
if assembleHeader: header += thisHead
@ -1138,13 +1138,13 @@ for incCount,position in enumerate(locations): # walk through locations
'content':[ p.element_scalar(p.element_sequence(e),stat['IndexOfLabel'][head])[n_local].value
for head in thisHead ]})
except KeyError:
print '\nDAMASK outputs seem missing from "post" section of the *.dat file!'
print('\nDAMASK outputs seem missing from "post" section of the *.dat file!')
sys.exit()
assembleHeader = False
if N == 0:
mappedResult = [float(x) for x in xrange(len(header))] # initialize with debug data (should get deleted by *N at N=0)
mappedResult = [float(x) for x in range(len(header))] # init with debug data (should get deleted by *N at N=0)
pos = 0
for chunk in newby:

View File

@ -67,7 +67,7 @@ for name in filenames:
if index == -1: remarks.append('label "{}" not present...'.format(options.label[i]))
else:
m = pattern[dimensions[i]>1].match(table.tags[index]) # isolate label name
for j in xrange(dimensions[i]):
for j in range(dimensions[i]):
table.tags[index+j] = table.tags[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute
if remarks != []: damask.util.croak(remarks)

View File

@ -12,7 +12,7 @@ scriptID = ' '.join([scriptName,damask.version])
# -----------------------------
def getHeader(filename,sizeFastIndex,sizeSlowIndex,stepsize):
"""returns header for ang file step size in micrometer"""
"""Returns header for ang file step size in micrometer"""
return '\n'.join([ \
'# TEM_PIXperUM 1.000000', \
'# x-star 1.000000', \
@ -48,7 +48,7 @@ def getHeader(filename,sizeFastIndex,sizeSlowIndex,stepsize):
# -----------------------------
def positiveRadians(angle):
"""returns positive angle in radians from angle in degrees"""
"""Returns positive angle in radians from angle in degrees"""
angle = math.radians(float(angle))
while angle < 0.0:
angle += 2.0 * math.pi
@ -59,7 +59,7 @@ def positiveRadians(angle):
# -----------------------------
def getDataLine(angles,x,y,validData=True):
"""
returns string of one line in ang file
Returns string of one line in ang file
convention in ang file: y coordinate comes first and is fastest index
positions in micrometer
@ -295,13 +295,13 @@ for filename in filenames:
if options.verbose: sys.stdout.write("\nGENERATING POINTS FOR POINT GRID")
points = vtk.vtkPoints()
for k in xrange(Npoints[2]):
for j in xrange(Npoints[0]):
for i in xrange(Npoints[1]): # y is fastest index
for k in range(Npoints[2]):
for j in range(Npoints[0]):
for i in range(Npoints[1]): # y is fastest index
rotatedpoint = np.array([rotatedbox[0][0] + (float(j) + 0.5) * options.resolution,
rotatedbox[1][0] + (float(i) + 0.5) * options.resolution,
rotatedbox[2][0] + (float(k) + 0.5) * options.distance ]) # point in rotated system
point = np.dot(R.T,rotatedpoint) # point in mesh system
rotatedbox[2][0] + (float(k) + 0.5) * options.distance ]) # point in rotated system
point = np.dot(R.T,rotatedpoint) # point in mesh system
points.InsertNextPoint(list(point))
if options.verbose:
sys.stdout.write("\rGENERATING POINTS FOR POINT GRID %d%%" %(100*(Npoints[1]*(k*Npoints[0]+j)+i+1)/totalNpoints))
@ -315,7 +315,7 @@ for filename in filenames:
if options.verbose: sys.stdout.write("\nGENERATING VERTICES FOR POINT GRID")
vertices = vtk.vtkCellArray()
for i in xrange(totalNpoints):
for i in range(totalNpoints):
vertex = vtk.vtkVertex()
vertex.GetPointIds().SetId(0,i) # each vertex consists of exactly one (index 0) point with ID "i"
vertices.InsertNextCell(vertex)
@ -378,7 +378,7 @@ for filename in filenames:
with open(angfilename,'w') as angfile:
if options.verbose: sys.stdout.write(" %s\n"%angfilename)
angfile.write(getHeader(filename,Npoints[1],Npoints[0],options.resolution*options.scale))
for i in xrange(sliceN*NpointsPerSlice,(sliceN+1)*NpointsPerSlice): # Iterate over points on slice
for i in range(sliceN*NpointsPerSlice,(sliceN+1)*NpointsPerSlice): # Iterate over points on slice
# Get euler angles of closest IDs

View File

@ -76,12 +76,12 @@ for name in filenames:
np.zeros((table.data.shape[0],
3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
coords = [np.unique(table.data[:,i]) for i in range(3)]
if options.mode == 'cell':
coords = [0.5 * np.array([3.0 * coords[i][0] - coords[i][0 + int(len(coords[i]) > 1)]] + \
[coords[i][j-1] + coords[i][j] for j in xrange(1,len(coords[i]))] + \
[3.0 * coords[i][-1] - coords[i][-1 - int(len(coords[i]) > 1)]]) for i in xrange(3)]
[coords[i][j-1] + coords[i][j] for j in range(1,len(coords[i]))] + \
[3.0 * coords[i][-1] - coords[i][-1 - int(len(coords[i]) > 1)]]) for i in range(3)]
grid = np.array(map(len,coords),'i')
N = grid.prod() if options.mode == 'point' else (grid-1).prod()

View File

@ -3,7 +3,7 @@
import sys,os,re
from optparse import OptionParser
import damask
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
@ -14,13 +14,13 @@ def ParseOutputFormat(filename,what,me):
outputmetafile = filename+'.output'+what
try:
file = open(outputmetafile)
myFile = open(outputmetafile)
except:
print('Could not open file %s'%outputmetafile)
raise
else:
content = file.readlines()
file.close()
content = myFile.readlines()
myFile.close()
tag = ''
tagID = 0
@ -60,7 +60,7 @@ that are written during the first run of the model.
Specify which user block format you want to apply by stating the homogenization, crystallite, and phase identifiers.
Or have an existing set of user variables copied over from another *.inp file.
""", version= scriptID)
""", version = scriptID)
parser.add_option('-m', dest='number', type='int', metavar = 'int',
help='maximum requested User Defined Variable [%default]')
@ -92,15 +92,15 @@ me = { 'Homogenization': options.homog,
}
for file in files:
print '\033[1m'+scriptName+'\033[0m: '+file+'\n'
for myFile in files:
print('\033[1m'+scriptName+'\033[0m: '+myFile+'\n')
if options.useFile:
formatFile = os.path.splitext(options.useFile)[0]
else:
formatFile = os.path.splitext(file)[0]
file = os.path.splitext(file)[0]+'.inp'
if not os.path.lexists(file):
print file,'not found'
formatFile = os.path.splitext(myFile)[0]
myFile = os.path.splitext(myFile)[0]+'.inp'
if not os.path.lexists(myFile):
print('{} not found'.format(myFile))
continue
print('Scanning format files of: %s'%formatFile)
@ -111,8 +111,8 @@ for file in files:
for what in me:
outputFormat[what] = ParseOutputFormat(formatFile,what,me[what])
if '_id' not in outputFormat[what]['specials']:
print "'%s' not found in <%s>"%(me[what],what)
print '\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers']))
print("'{}' not found in <{}>".format(me[what],what))
print('\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers'])))
sys.exit(1)
UserVars = ['HomogenizationCount']
@ -140,11 +140,11 @@ for file in files:
UserVars += ['%i_%s'%(grain+1,var[0]) for i in range(var[1])]
# Now change *.inp file(s)
print('Adding labels to: %s'%file)
inFile = open(file)
print('Adding labels to: %s'%myFile)
inFile = open(myFile)
input = inFile.readlines()
inFile.close()
output = open(file,'w')
output = open(myFile,'w')
thisSection = ''
if options.damaskOption:
output.write('$damask {0}\n'.format(options.damaskOption))
@ -165,3 +165,4 @@ for file in files:
if (thisSection.upper() != '*DEPVAR' or not re.match('\s*\d',line)):
output.write(line)
output.close()

View File

@ -93,12 +93,12 @@ for name in filenames:
microstructure_cropped = np.zeros(newInfo['grid'],datatype)
microstructure_cropped.fill(options.fill if options.real or options.fill > 0 else microstructure.max()+1)
xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \
set(xrange(info['grid'][0])))
yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \
set(xrange(info['grid'][1])))
zindex = list(set(xrange(options.offset[2],options.offset[2]+newInfo['grid'][2])) & \
set(xrange(info['grid'][2])))
xindex = list(set(range(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \
set(range(info['grid'][0])))
yindex = list(set(range(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \
set(range(info['grid'][1])))
zindex = list(set(range(options.offset[2],options.offset[2]+newInfo['grid'][2])) & \
set(range(info['grid'][2])))
translate_x = [i - options.offset[0] for i in xindex]
translate_y = [i - options.offset[1] for i in yindex]
translate_z = [i - options.offset[2] for i in zindex]

View File

@ -125,10 +125,10 @@ for name in filenames:
Y = options.periods*2.0*math.pi*(np.arange(options.grid[1])+0.5)/options.grid[1]
Z = options.periods*2.0*math.pi*(np.arange(options.grid[2])+0.5)/options.grid[2]
for z in xrange(options.grid[2]):
for y in xrange(options.grid[1]):
for z in range(options.grid[2]):
for y in range(options.grid[1]):
table.data_clear()
for x in xrange(options.grid[0]):
for x in range(options.grid[0]):
table.data_append(options.microstructure[options.threshold < surface[options.type](X[x],Y[y],Z[z])])
table.data_write()

View File

@ -105,8 +105,8 @@ microstructure = np.where(radius < float(options.canal),1,0) + np.where(radius >
alphaOfGrain = np.zeros(info['grid'][0]*info['grid'][1],'d')
betaOfGrain = np.zeros(info['grid'][0]*info['grid'][1],'d')
for y in xrange(info['grid'][1]):
for x in xrange(info['grid'][0]):
for y in range(info['grid'][1]):
for x in range(info['grid'][0]):
if microstructure[y,x] == 0:
microstructure[y,x] = info['microstructures']
alphaOfGrain[info['microstructures']] = alpha[y,x]
@ -129,7 +129,7 @@ header.append('(constituent)\tphase 1\ttexture 1\tfraction 1.0')
header.append('[interstitial]')
header.append('crystallite %i'%options.crystallite)
header.append('(constituent)\tphase 2\ttexture 2\tfraction 1.0')
for i in xrange(3,info['microstructures']):
for i in range(3,info['microstructures']):
header.append('[Grain%s]'%(str(i).zfill(formatwidth)))
header.append('crystallite %i'%options.crystallite)
header.append('(constituent)\tphase 3\ttexture %s\tfraction 1.0'%(str(i).rjust(formatwidth)))
@ -137,7 +137,7 @@ for i in xrange(3,info['microstructures']):
header.append('<texture>')
header.append('[canal]')
header.append('[interstitial]')
for i in xrange(3,info['microstructures']):
for i in range(3,info['microstructures']):
header.append('[Grain%s]'%(str(i).zfill(formatwidth)))
header.append('(gauss)\tphi1 %g\tPhi %g\tphi2 0\tscatter 0.0\tfraction 1.0'\
%(alphaOfGrain[i],betaOfGrain[i]))

View File

@ -163,7 +163,7 @@ for name in filenames:
# --------------- figure out size and grid ---------------------------------------------------------
coords = [np.unique(table.data[:,i]) for i in xrange(3)]
coords = [np.unique(table.data[:,i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -217,9 +217,9 @@ for name in filenames:
tick = time.clock()
if options.verbose: bg.set_message('assigning grain IDs...')
for z in xrange(grid[2]):
for y in xrange(grid[1]):
for x in xrange(grid[0]):
for z in range(grid[2]):
for y in range(grid[1]):
for x in range(grid[0]):
if (myPos+1)%(N/500.) < 1:
time_delta = (time.clock()-tick) * (N - myPos) / myPos
if options.verbose: bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'

View File

@ -13,7 +13,7 @@ scriptID = ' '.join([scriptName,damask.version])
def meshgrid2(*arrs):
"""code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d"""
"""Code inspired by http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d"""
arrs = tuple(reversed(arrs))
arrs = tuple(arrs)
lens = np.array(map(len, arrs))
@ -240,7 +240,7 @@ for name in filenames:
if np.any(info['size'] <= 0.0) \
and np.all(info['grid'] < 1): errors.append('invalid size x y z.')
else:
for i in xrange(3):
for i in range(3):
if info['size'][i] <= 0.0: # any invalid size?
info['size'][i] = float(info['grid'][i])/max(info['grid']) # normalize to grid
remarks.append('rescaling size {} to {}...'.format({0:'x',1:'y',2:'z'}[i],info['size'][i]))

View File

@ -95,7 +95,7 @@ for name in filenames:
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
for smoothIter in xrange(options.N):
for smoothIter in range(options.N):
periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # periodically extend the microstructure

View File

@ -76,7 +76,7 @@ for name in filenames:
items = table.data
if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0])
elif items[1].lower() == 'to': items = xrange(int(items[0]),1+int(items[2]))
elif items[1].lower() == 'to': items = range(int(items[0]),1+int(items[2]))
else: items = map(int,items)
else: items = map(int,items)

View File

@ -92,10 +92,10 @@ for name in filenames:
newInfo['size'] = np.where(newInfo['size'] <= 0.0, info['size'],newInfo['size'])
multiplicity = []
for j in xrange(3):
for j in range(3):
multiplicity.append([])
last = 0
for i in xrange(info['grid'][j]):
for i in range(info['grid'][j]):
this = int((i+1)*float(newInfo['grid'][j])/info['grid'][j])
multiplicity[j].append(this-last)
last = this

View File

@ -63,7 +63,7 @@ for name in filenames:
table.info_clear()
table.info_append(extra_header + [scriptID + '\t' + ' '.join(sys.argv[1:])])
table.labels_clear()
table.labels_append(['{}_{}'.format(1+i,'pos') for i in xrange(3)]+['microstructure'])
table.labels_append(['{}_{}'.format(1+i,'pos') for i in range(3)]+['microstructure'])
table.head_write()
table.output_flush()

View File

@ -46,7 +46,7 @@ parser.set_defaults(origin = (0.0,0.0,0.0),
datatype = 'f' if options.real else 'i'
sub = {}
for i in xrange(len(options.substitute)/2): # split substitution list into "from" -> "to"
for i in range(len(options.substitute)/2): # split substitution list into "from" -> "to"
sub[int(options.substitute[i*2])] = int(options.substitute[i*2+1])
# --- loop over input files ----------------------------------------------------------------------
@ -82,7 +82,7 @@ for name in filenames:
# --- read data ----------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid'],datatype) # read microstructure
microstructure = table.microstructure_read(info['grid'],datatype) # read microstructure
# --- do work ------------------------------------------------------------------------------------

View File

@ -19,7 +19,7 @@ def integerFactorization(i):
return j
def binAsBins(bin,intervals):
"""explode compound bin into 3D bins list"""
"""Explode compound bin into 3D bins list"""
bins = [0]*3
bins[0] = (bin//(intervals[1] * intervals[2])) % intervals[0]
bins[1] = (bin//intervals[2]) % intervals[1]
@ -27,17 +27,17 @@ def binAsBins(bin,intervals):
return bins
def binsAsBin(bins,intervals):
"""implode 3D bins into compound bin"""
"""Implode 3D bins into compound bin"""
return (bins[0]*intervals[1] + bins[1])*intervals[2] + bins[2]
def EulersAsBins(Eulers,intervals,deltas,center):
"""return list of Eulers translated into 3D bins list"""
"""Return list of Eulers translated into 3D bins list"""
return [int((euler+(0.5-center)*delta)//delta)%interval \
for euler,delta,interval in zip(Eulers,deltas,intervals) \
]
def binAsEulers(bin,intervals,deltas,center):
"""compound bin number translated into list of Eulers"""
"""Compound bin number translated into list of Eulers"""
Eulers = [0.0]*3
Eulers[2] = (bin%intervals[2] + center)*deltas[2]
Eulers[1] = (bin//intervals[2]%intervals[1] + center)*deltas[1]
@ -45,7 +45,7 @@ def binAsEulers(bin,intervals,deltas,center):
return Eulers
def directInvRepetitions(probability,scale):
"""calculate number of samples drawn by direct inversion"""
"""Calculate number of samples drawn by direct inversion"""
nDirectInv = 0
for bin in range(len(probability)): # loop over bins
nDirectInv += int(round(probability[bin]*scale)) # calc repetition
@ -270,7 +270,7 @@ for name in filenames:
ODF['limit'] = np.radians(limits[1,:]) # right hand limits in radians
ODF['center'] = 0.0 if all(limits[0,:]<1e-8) else 0.5 # vertex or cell centered
ODF['interval'] = np.array(map(len,[np.unique(table.data[:,i]) for i in xrange(3)]),'i') # steps are number of distict values
ODF['interval'] = np.array(map(len,[np.unique(table.data[:,i]) for i in range(3)]),'i') # steps are number of distict values
ODF['nBins'] = ODF['interval'].prod()
ODF['delta'] = np.radians(np.array(limits[1,0:3]-limits[0,0:3])/(ODF['interval']-1)) # step size
@ -349,7 +349,7 @@ for name in filenames:
'#-------------------#',
]
for i,ID in enumerate(xrange(nSamples)):
for i,ID in enumerate(range(nSamples)):
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),
'crystallite %i'%options.crystallite,
'(constituent) phase %i texture %s fraction 1.0'%(options.phase,str(ID+1).rjust(formatwidth)),
@ -361,7 +361,7 @@ for name in filenames:
'#-------------------#',
]
for ID in xrange(nSamples):
for ID in range(nSamples):
eulers = Orientations[ID]
materialConfig += ['[Grain%s]'%(str(ID+1).zfill(formatwidth)),

View File

@ -1,13 +1,6 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
"""
Writes meaningful labels to the marc input file (*.dat)
output is based on the files
<modelname_jobname>.output<Homogenization/Crystallite/Constitutive>
that are written during the first run of the model.
"""
import sys,os,re
from optparse import OptionParser
import damask
@ -21,13 +14,13 @@ def ParseOutputFormat(filename,what,me):
outputmetafile = filename+'.output'+what
try:
file = open(outputmetafile)
myFile = open(outputmetafile)
except:
print('Could not open file %s'%outputmetafile)
raise
else:
content = file.readlines()
file.close()
content = myFile.readlines()
myFile.close()
tag = ''
tagID = 0
@ -109,15 +102,15 @@ me = { 'Homogenization': options.homog,
}
for file in files:
print '\033[1m'+scriptName+'\033[0m: '+file+'\n'
for myFile in files:
print('\033[1m'+scriptName+'\033[0m: '+myFile+'\n')
if options.useFile != '':
formatFile = os.path.splitext(options.useFile)[0]
else:
formatFile = os.path.splitext(file)[0]
file = os.path.splitext(file)[0]+'.dat'
if not os.path.lexists(file):
print file,'not found'
formatFile = os.path.splitext(myFile)[0]
myFile = os.path.splitext(myFile)[0]+'.dat'
if not os.path.lexists(myFile):
print('{} not found'.format(myFile))
continue
print('Scanning format files of: %s'%formatFile)
@ -128,8 +121,8 @@ for file in files:
for what in me:
outputFormat[what] = ParseOutputFormat(formatFile,what,me[what])
if '_id' not in outputFormat[what]['specials']:
print "'%s' not found in <%s>"%(me[what],what)
print '\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers']))
print("'{}' not found in <{}>"%(me[what],what))
print('\n'.join(map(lambda x:' '+x,outputFormat[what]['specials']['brothers'])))
sys.exit(1)
UserVars = ['HomogenizationCount']
@ -157,11 +150,11 @@ for file in files:
UserVars += ['%i_%s'%(grain+1,var[0]) for i in range(var[1])]
# Now change *.dat file(s)
print('Adding labels to: %s'%file)
inFile = open(file)
print('Adding labels to: %s'%myFile)
inFile = open(myFile)
input = inFile.readlines()
inFile.close()
output = open(file,'w')
output = open(myFile,'w')
thisSection = ''
if options.damaskOption:
output.write('$damask {0}\n'.format(options.damaskOption))

View File

@ -58,14 +58,14 @@ def servoLink():
}
Nnodes = py_mentat.py_get_int("nnodes()")
NodeCoords = np.zeros((Nnodes,3),dtype='d')
for node in xrange(Nnodes):
for node in range(Nnodes):
NodeCoords[node,0] = py_mentat.py_get_float("node_x(%i)"%(node+1))
NodeCoords[node,1] = py_mentat.py_get_float("node_y(%i)"%(node+1))
NodeCoords[node,2] = py_mentat.py_get_float("node_z(%i)"%(node+1))
box['min'] = NodeCoords.min(axis=0) # find the bounding box
box['min'] = NodeCoords.min(axis=0) # find the bounding box
box['max'] = NodeCoords.max(axis=0)
box['delta'] = box['max']-box['min']
for coord in xrange(3): # calc the dimension of the bounding box
for coord in range(3): # calc the dimension of the bounding box
if box['delta'][coord] != 0.0:
for extremum in ['min','max']:
rounded = round(box[extremum][coord]*1e+15/box['delta'][coord]) * \
@ -76,12 +76,12 @@ def servoLink():
#-------------------------------------------------------------------------------------------------
# loop over all nodes
for node in xrange(Nnodes):
for node in range(Nnodes):
key = {}
maxFlag = [False, False, False]
Nmax = 0
Nmin = 0
for coord in xrange(3): # for each direction
for coord in range(3): # for each direction
if box['delta'][coord] != 0.0:
rounded = round(NodeCoords[node,coord]*1e+15/box['delta'][coord]) * \
1e-15*box['delta'][coord] # rounding to 1e-15 of dimension
@ -102,18 +102,18 @@ def servoLink():
if key['z'] not in baseNode[key['x']][key['y']].keys():
baseNode[key['x']][key['y']][key['z']] = 0
baseNode[key['x']][key['y']][key['z']] = node+1 # remember the base node id
baseNode[key['x']][key['y']][key['z']] = node+1 # remember the base node id
if Nmax > 0 and Nmax >= Nmin: # node is on at least as many front than back faces
if any([maxFlag[i] and active[i] for i in xrange(3)]):
linkNodes.append({'id': node+1,'coord': NodeCoords[node], 'faceMember': [maxFlag[i] and active[i] for i in xrange(3)]})
if Nmax > 0 and Nmax >= Nmin: # node is on at least as many front than back faces
if any([maxFlag[i] and active[i] for i in range(3)]):
linkNodes.append({'id': node+1,'coord': NodeCoords[node], 'faceMember': [maxFlag[i] and active[i] for i in range(3)]})
baseCorner = baseNode["%.8e"%box['min'][0]]["%.8e"%box['min'][1]]["%.8e"%box['min'][2]] # detect ultimate base node
for node in linkNodes: # loop over all linked nodes
linkCoord = [node['coord']] # start list of control node coords with my coords
for dir in xrange(3): # check for each direction
for dir in range(3): # check for each direction
if node['faceMember'][dir]: # me on this front face
linkCoord[0][dir] = box['min'][dir] # project me onto rear face along dir
linkCoord.append(np.array(box['min'])) # append base corner

View File

@ -42,9 +42,9 @@ def outStdout(cmd,locals):
exec(cmd[3:])
elif cmd[0:3] == '(?)':
cmd = eval(cmd[3:])
print cmd
print(cmd)
else:
print cmd
print(cmd)
return
@ -84,7 +84,7 @@ def rcbOrientationParser(content,idcolumn):
return grains
def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn):
"""parser for TSL-OIM reconstructed boundary files"""
"""Parser for TSL-OIM reconstructed boundary files"""
# find bounding box
boxX = [1.*sys.maxint,-1.*sys.maxint]
boxY = [1.*sys.maxint,-1.*sys.maxint]
@ -333,21 +333,21 @@ def rcbParser(content,M,size,tolerance,idcolumn,segmentcolumn):
for neighbors in grainNeighbors:
rcData['neighbors'].append(neighbors)
for legs in grains['legs']: # loop over grains
rcData['grain'].append(legs) # store list of boundary segments
for legs in grains['legs']: # loop over grains
rcData['grain'].append(legs) # store list of boundary segments
myNeighbors = {}
for leg in legs: # test each boundary segment
if leg < len(grainNeighbors): # a valid segment index?
for side in range(2): # look at both sides of the segment
if grainNeighbors[leg][side] in myNeighbors: # count occurrence of grain IDs
for leg in legs: # test each boundary segment
if leg < len(grainNeighbors): # a valid segment index?
for side in range(2): # look at both sides of the segment
if grainNeighbors[leg][side] in myNeighbors: # count occurrence of grain IDs
myNeighbors[grainNeighbors[leg][side]] += 1
else:
myNeighbors[grainNeighbors[leg][side]] = 1
if myNeighbors: # do I have any neighbors (i.e., non-bounding box segment)
candidateGrains = sorted(myNeighbors.iteritems(), key=lambda (k,v): (v,k), reverse=True) # sort grain counting
# most frequent one not yet seen?
if myNeighbors: # do I have any neighbors (i.e., non-bounding box segment)
candidateGrains = sorted(myNeighbors.items(), key=lambda (k,v): (v,k), reverse=True) # sort grain counting
# most frequent one not yet seen?
rcData['grainMapping'].append(candidateGrains[0 if candidateGrains[0][0] not in rcData['grainMapping'] else 1][0]) # must be me then
# special case of bi-crystal situation...
# special case of bi-crystal situation...
damask.util.croak(' found {} grains'.format(len(rcData['grain'])))
@ -729,7 +729,7 @@ def image(name,imgsize,marginX,marginY,rcData):
# -------------------------
def inside(x,y,points):
"""tests whether point(x,y) is within polygon described by points"""
"""Tests whether point(x,y) is within polygon described by points"""
inside = False
npoints=len(points)
(x1,y1) = points[npoints-1] # start with last point of points
@ -751,7 +751,7 @@ def inside(x,y,points):
# -------------------------
def fftbuild(rcData,height,xframe,yframe,resolution,extrusion):
"""build array of grain numbers"""
"""Build array of grain numbers"""
maxX = -1.*sys.maxint
maxY = -1.*sys.maxint
for line in rcData['point']: # find data range

View File

@ -4,7 +4,6 @@
import threading,time,os,sys,random
import numpy as np
from optparse import OptionParser
from operator import mul
from cStringIO import StringIO
import damask
@ -65,7 +64,7 @@ class myThread (threading.Thread):
NmoveGrains = random.randrange(1,maxSeeds)
selectedMs = []
direction = []
for i in xrange(NmoveGrains):
for i in range(NmoveGrains):
selectedMs.append(random.randrange(1,nMicrostructures))
direction.append(np.array(((random.random()-0.5)*delta[0],
@ -110,7 +109,7 @@ class myThread (threading.Thread):
currentData=np.bincount(perturbedGeomTable.data.astype(int).ravel())[1:]/points
currentError=[]
currentHist=[]
for i in xrange(nMicrostructures): # calculate the deviation in all bins per histogram
for i in range(nMicrostructures): # calculate the deviation in all bins per histogram
currentHist.append(np.histogram(currentData,bins=target[i]['bins'])[0])
currentError.append(np.sqrt(np.square(np.array(target[i]['histogram']-currentHist[i])).sum()))
@ -122,12 +121,12 @@ class myThread (threading.Thread):
bestMatch = match
#--- count bin classes with no mismatch ----------------------------------------------------------------------
myMatch=0
for i in xrange(nMicrostructures):
for i in range(nMicrostructures):
if currentError[i] > 0.0: break
myMatch = i+1
if myNmicrostructures == nMicrostructures:
for i in xrange(min(nMicrostructures,myMatch+options.bins)):
for i in range(min(nMicrostructures,myMatch+options.bins)):
if currentError[i] > target[i]['error']: # worse fitting, next try
randReset = True
break
@ -146,7 +145,7 @@ class myThread (threading.Thread):
for line in perturbedSeedsVFile:
currentSeedsFile.write(line)
bestSeedsVFile.write(line)
for j in xrange(nMicrostructures): # save new errors for all bins
for j in range(nMicrostructures): # save new errors for all bins
target[j]['error'] = currentError[j]
if myMatch > match: # one or more new bins have no deviation
damask.util.croak( 'Stage {:d} cleared'.format(myMatch))
@ -160,7 +159,7 @@ class myThread (threading.Thread):
bestSeedsVFile = StringIO()
for line in perturbedSeedsVFile:
bestSeedsVFile.write(line)
for j in xrange(nMicrostructures):
for j in range(nMicrostructures):
target[j]['error'] = currentError[j]
randReset = True
else: #--- not all grains are tessellated
@ -219,8 +218,7 @@ if options.randomSeed is None:
damask.util.croak(options.randomSeed)
delta = (options.scale/options.grid[0],options.scale/options.grid[1],options.scale/options.grid[2])
baseFile=os.path.splitext(os.path.basename(options.seedFile))[0]
points = float(reduce(mul,options.grid))
points = np.array(options.grid).prod().astype('float')
# ----------- calculate target distribution and bin edges
targetGeomFile = os.path.splitext(os.path.basename(options.target))[0]+'.geom'
@ -231,7 +229,7 @@ nMicrostructures = info['microstructures']
targetVolFrac = np.bincount(targetGeomTable.microstructure_read(info['grid']))[1:nMicrostructures+1]/\
float(info['grid'].prod())
target=[]
for i in xrange(1,nMicrostructures+1):
for i in range(1,nMicrostructures+1):
targetHist,targetBins = np.histogram(targetVolFrac,bins=i) #bin boundaries
target.append({'histogram':targetHist,'bins':targetBins})
@ -260,7 +258,7 @@ info,devNull = initialGeomTable.head_getGeom()
if info['microstructures'] != nMicrostructures: damask.util.croak('error. Microstructure count mismatch')
initialData = np.bincount(initialGeomTable.microstructure_read(info['grid']))/points
for i in xrange(nMicrostructures):
for i in range(nMicrostructures):
initialHist = np.histogram(initialData,bins=target[i]['bins'])[0]
target[i]['error']=np.sqrt(np.square(np.array(target[i]['histogram']-initialHist)).sum())
@ -269,7 +267,7 @@ if target[0]['error'] > 0.0:
target[0]['error'] *=((target[0]['bins'][0]-np.min(initialData))**2.0+
(target[0]['bins'][1]-np.max(initialData))**2.0)**0.5
match=0
for i in xrange(nMicrostructures):
for i in range(nMicrostructures):
if target[i]['error'] > 0.0: break
match = i+1

View File

@ -105,12 +105,12 @@ for name in filenames:
grid = np.zeros(3,'i')
n = 0
for i in xrange(Nx):
for j in xrange(Ny):
for i in range(Nx):
for j in range(Ny):
grid[0] = round((i+0.5)*box[0]*info['grid'][0]/Nx-0.5)+offset[0]
grid[1] = round((j+0.5)*box[1]*info['grid'][1]/Ny-0.5)+offset[1]
damask.util.croak('x,y coord on surface: {},{}...'.format(*grid[:2]))
for k in xrange(Nz):
for k in range(Nz):
grid[2] = k + offset[2]
grid %= info['grid']
seeds[n,0:3] = (0.5+grid)/info['grid'] # normalize coordinates to box

View File

@ -14,12 +14,12 @@ scriptID = ' '.join([scriptName,damask.version])
# ------------------------------------------ aux functions ---------------------------------
def kdtree_search(cloud, queryPoints):
"""find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d)"""
"""Find distances to nearest neighbor among cloud (N,d) for each of the queryPoints (n,d)"""
n = queryPoints.shape[0]
distances = np.zeros(n,dtype=float)
tree = spatial.cKDTree(cloud)
for i in xrange(n):
for i in range(n):
distances[i], index = tree.query(queryPoints[i])
return distances
@ -227,8 +227,8 @@ for name in filenames:
"randomSeed\t{}".format(options.randomSeed),
])
table.labels_clear()
table.labels_append( ['{dim}_{label}'.format(dim = 1+k,label = 'pos') for k in xrange(3)] +
['{dim}_{label}'.format(dim = 1+k,label = 'euler') for k in xrange(3)] +
table.labels_append( ['{dim}_{label}'.format(dim = 1+k,label = 'pos') for k in range(3)] +
['{dim}_{label}'.format(dim = 1+k,label = 'euler') for k in range(3)] +
['microstructure'] +
(['weight'] if options.weights else []))
table.head_write()