Merge branch 'development' of magit1.mpie.de:damask/DAMASK into development

This commit is contained in:
Martin Diehl 2016-12-20 17:57:27 +01:00
commit 04af989dbf
22 changed files with 472 additions and 205 deletions

View File

@ -1 +1 @@
v2.0.1-306-g2d0193e
v2.0.1-339-gd67be0e

View File

@ -712,6 +712,7 @@ C_routines.o: C_routines.c
tidy:
@rm -rf *.o
@rm -rf *.mod
@rm -rf *.optrpt
@rm -rf *.inst.f90 # for instrumentation
@rm -rf *.pomp.f90 # for instrumentation
@rm -rf *.pp.f90 # for instrumentation
@ -724,6 +725,7 @@ cleanDAMASK:
@rm -rf *.marc
@rm -rf *.o
@rm -rf *.mod
@rm -rf *.optrpt
@rm -rf *.inst.f90 # for instrumentation
@rm -rf *.pomp.f90 # for instrumentation
@rm -rf *.pp.f90 # for instrumentation

View File

@ -1051,6 +1051,8 @@ end subroutine material_parsePhase
!> @brief parses the texture part in the material configuration file
!--------------------------------------------------------------------------------------------------
subroutine material_parseTexture(fileUnit,myPart)
use prec, only: &
dNeq
use IO, only: &
IO_read, &
IO_globalTagInPart, &
@ -1069,6 +1071,7 @@ subroutine material_parseTexture(fileUnit,myPart)
inRad, &
math_sampleRandomOri, &
math_I3, &
math_det33, &
math_inv33
implicit none
@ -1154,6 +1157,9 @@ subroutine material_parseTexture(fileUnit,myPart)
end select
enddo
if(dNeq(math_det33(texture_transformation(1:3,1:3,section)),1.0_pReal)) &
call IO_error(157_pInt,section)
case ('hybridia') textureType
texture_ODFfile(section) = IO_stringValue(line,chunkPos,2_pInt)
@ -1586,13 +1592,8 @@ subroutine material_populateGrains
deallocate(phaseOfGrain)
deallocate(textureOfGrain)
deallocate(orientationOfGrain)
deallocate(texture_transformation)
deallocate(Nelems)
!> @todo - causing segmentation fault: needs looking into
!do homog = 1,material_Nhomogenization
! do micro = 1,material_Nmicrostructure
! if (Nelems(homog,micro) > 0_pInt) deallocate(elemsOfHomogMicro(homog,micro)%p)
! enddo
!enddo
deallocate(elemsOfHomogMicro)
end subroutine material_populateGrains

View File

@ -0,0 +1,17 @@
# DAMASK patching
This folder contains patches that modify the functionality of the current version of DAMASK prior to the corresponding inclusion in the official release.
## Usage
```bash
cd DAMASK_ROOT
patch -p1 < installation/patch/nameOfPatch
```
## Available patches
* **fwbw_derivative** switches the default spatial derivative from continuous to forward/backward difference.
This generally reduces spurious oscillations in the result as the spatial accuracy of the derivative is then compatible with the underlying solution grid.
* **petsc3.7** adapts to API changes introduced between PETSc 3.6.x and 3.7.x for setting PETSc options.
Use this patch if your system runs PETSc 3.7.x.

View File

@ -0,0 +1,13 @@
diff --git a/code/numerics.f90 b/code/numerics.f90
index 24bd190..c968c70 100644
--- a/code/numerics.f90
+++ b/code/numerics.f90
@@ -110,7 +110,7 @@ module numerics
fftw_plan_mode = 'FFTW_PATIENT' !< reads the planing-rigor flag, see manual on www.fftw.org, Default FFTW_PATIENT: use patient planner flag
character(len=64), protected, public :: &
spectral_solver = 'basicpetsc' , & !< spectral solution method
- spectral_derivative = 'continuous' !< spectral spatial derivative method
+ spectral_derivative = 'fwbw_difference' !< spectral spatial derivative method
character(len=1024), protected, public :: &
petsc_defaultOptions = '-mech_snes_type ngmres &
&-damage_snes_type ngmres &

View File

@ -0,0 +1,22 @@
diff --git a/code/spectral_utilities.f90 b/code/spectral_utilities.f90
index 34eb0ea..a33c7a9 100644
--- a/code/spectral_utilities.f90
+++ b/code/spectral_utilities.f90
@@ -227,12 +227,13 @@ subroutine utilities_init()
trim(PETScDebug), &
' add more using the PETSc_Options keyword in numerics.config '; flush(6)
- call PetscOptionsClear(ierr); CHKERRQ(ierr)
- if(debugPETSc) call PetscOptionsInsertString(trim(PETSCDEBUG),ierr)
+ call PetscOptionsClear(PETSC_NULL_OBJECT,ierr)
CHKERRQ(ierr)
- call PetscOptionsInsertString(trim(petsc_defaultOptions),ierr)
+ if(debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OBJECT,trim(PETSCDEBUG),ierr)
CHKERRQ(ierr)
- call PetscOptionsInsertString(trim(petsc_options),ierr)
+ call PetscOptionsInsertString(PETSC_NULL_OBJECT,trim(petsc_defaultOptions),ierr)
+ CHKERRQ(ierr)
+ call PetscOptionsInsertString(PETSC_NULL_OBJECT,trim(petsc_options),ierr)
CHKERRQ(ierr)
grid1Red = grid(1)/2_pInt + 1_pInt

View File

@ -427,8 +427,8 @@ class ASCIItable():
start = self.label_index(labels)
dim = self.label_dimension(labels)
return np.hstack([range(c[0],c[0]+c[1]) for c in zip(start,dim)]) \
if isinstance(labels, Iterable) and not isinstance(labels, str) \
return np.hstack([range(s,s+d) for s,d in zip(start,dim)]).astype(int) \
if isinstance(labels, Iterable) and not isinstance(labels, str) \
else range(start,start+dim)
# ------------------------------------------------------------------
@ -513,14 +513,15 @@ class ASCIItable():
columns = []
for i,(c,d) in enumerate(zip(indices[present],dimensions[present])): # for all valid labels ...
# ... transparently add all components unless column referenced by number or with explicit dimension
columns += list(range(c,c + \
(d if str(c) != str(labels[present[i]]) else \
columns += list(range(c,c +
(d if str(c) != str(labels[present[i]]) else
1)))
use = np.array(columns) if len(columns) > 0 else None
self.tags = list(np.array(self.tags)[use]) # update labels with valid subset
self.data = np.loadtxt(self.__IO__['in'],usecols=use,ndmin=2)
# self.data = np.genfromtxt(self.__IO__['in'],dtype=None,names=self.tags,usecols=use)
return labels_missing

View File

@ -137,18 +137,18 @@ class Quaternion:
def __imul__(self, other):
"""In-place multiplication"""
try: # Quaternion
Aw = self.w
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bw = other.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
self.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
self.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
self.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
self.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
self.w = - Ax * Bx - Ay * By - Az * Bz + Aw * Bw
self.x = + Ax * Bw + Ay * Bz - Az * By + Aw * Bx
self.y = - Ax * Bz + Ay * Bw + Az * Bx + Aw * By
self.z = + Ax * By - Ay * Bx + Az * Bw + Aw * Bz
except: pass
return self
@ -744,27 +744,27 @@ class Symmetry:
if self.lattice == 'cubic':
basis = {'improper':np.array([ [-1. , 0. , 1. ],
[ np.sqrt(2.) , -np.sqrt(2.) , 0. ],
[ 0. , np.sqrt(3.) , 0. ] ]),
[ np.sqrt(2.) , -np.sqrt(2.) , 0. ],
[ 0. , np.sqrt(3.) , 0. ] ]),
'proper':np.array([ [ 0. , -1. , 1. ],
[-np.sqrt(2.) , np.sqrt(2.) , 0. ],
[ np.sqrt(3. ) , 0. , 0. ] ]),
[-np.sqrt(2.) , np.sqrt(2.) , 0. ],
[ np.sqrt(3.) , 0. , 0. ] ]),
}
elif self.lattice == 'hexagonal':
basis = {'improper':np.array([ [ 0. , 0. , 1. ],
[ 1. , -np.sqrt(3.) , 0. ],
[ 0. , 2. , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , np.sqrt(3.) , 0. ],
[ np.sqrt(3) , -1. , 0. ] ]),
[ 1. , -np.sqrt(3.) , 0. ],
[ 0. , 2. , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , np.sqrt(3.) , 0. ],
[ np.sqrt(3.) , -1. , 0. ] ]),
}
elif self.lattice == 'tetragonal':
basis = {'improper':np.array([ [ 0. , 0. , 1. ],
[ 1. , -1. , 0. ],
[ 0. , np.sqrt(2.) , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , 1. , 0. ],
[ np.sqrt(2.) , 0. , 0. ] ]),
[ 1. , -1. , 0. ],
[ 0. , np.sqrt(2.) , 0. ] ]),
'proper':np.array([ [ 0. , 0. , 1. ],
[-1. , 1. , 0. ],
[ np.sqrt(2.) , 0. , 0. ] ]),
}
elif self.lattice == 'orthorhombic':
basis = {'improper':np.array([ [ 0., 0., 1.],
@ -774,26 +774,23 @@ class Symmetry:
[-1., 0., 0.],
[ 0., 1., 0.] ]),
}
else:
basis = {'improper': np.zeros((3,3),dtype=float),
'proper': np.zeros((3,3),dtype=float),
}
else: # direct exit for unspecified symmetry
if color:
return (True,np.zeros(3,'d'))
else:
return True
if np.all(basis == 0.0):
theComponents = -np.ones(3,'d')
v = np.array(vector,dtype = float)
if proper: # check both improper ...
theComponents = np.dot(basis['improper'],v)
inSST = np.all(theComponents >= 0.0)
else:
v = np.array(vector,dtype = float)
if proper: # check both improper ...
theComponents = np.dot(basis['improper'],v)
inSST = np.all(theComponents >= 0.0)
if not inSST: # ... and proper SST
theComponents = np.dot(basis['proper'],v)
inSST = np.all(theComponents >= 0.0)
else:
v[2] = abs(v[2]) # z component projects identical
theComponents = np.dot(basis['improper'],v) # for positive and negative values
if not inSST: # ... and proper SST
theComponents = np.dot(basis['proper'],v)
inSST = np.all(theComponents >= 0.0)
else:
v[2] = abs(v[2]) # z component projects identical
theComponents = np.dot(basis['improper'],v) # for positive and negative values
inSST = np.all(theComponents >= 0.0)
if color: # have to return color array
if inSST:
@ -806,7 +803,7 @@ class Symmetry:
else:
return inSST
# code derived from http://pyeuclid.googlecode.com/svn/trunk/euclid.py
# code derived from https://github.com/ezag/pyeuclid
# suggested reading: http://web.mit.edu/2.998/www/QuaternionReport1.pdf
@ -996,7 +993,7 @@ class Orientation:
def related(self,
relationModel,
direction,
targetSymmetry = None):
targetSymmetry = 'cubic'):
"""
Orientation relationship
@ -1244,4 +1241,4 @@ class Orientation:
rot=np.dot(otherMatrix,myMatrix.T)
return Orientation(matrix=np.dot(rot,self.asMatrix())) # no symmetry information ??
return Orientation(matrix=np.dot(rot,self.asMatrix()),symmetry=targetSymmetry)

View File

@ -58,11 +58,11 @@ class Test():
self.parser.add_option("--ok", "--accept",
action = "store_true",
dest = "accept",
help = "calculate results but always consider test as successfull")
help = "calculate results but always consider test as successful")
self.parser.add_option("-l", "--list",
action = "store_true",
dest = "show",
help = "show all test variants and do no calculation")
help = "show all test variants without actual calculation")
self.parser.add_option("-s", "--select",
dest = "select",
help = "run test of given name only")
@ -83,8 +83,9 @@ class Test():
for variant,name in enumerate(self.variants):
if self.options.show:
logging.critical('{}: {}'.format(variant,name))
elif self.options.select is not None and name != self.options.select:
logging.critical('{}: {}'.format(variant+1,name))
elif self.options.select is not None \
and not (name == self.options.select or str(variant+1) == self.options.select):
pass
else:
try:
@ -93,14 +94,15 @@ class Test():
self.run(variant)
self.postprocess(variant)
self.compare(variant)
if self.options.update:
if self.update(variant) != 0: logging.critical('update for "{}" failed.'.format(name))
elif not (self.options.accept or self.compare(variant)): # no update, do comparison
return variant+1 # return culprit
except Exception as e :
logging.critical('exception during variant execution: {}'.format(e))
except Exception as e:
logging.critical('exception during variant execution: "{}"'.format(e.message))
return variant+1 # return culprit
return 0
@ -465,7 +467,7 @@ class Test():
mean = np.amax(np.abs(np.mean(normedDelta,0)))
std = np.amax(np.std(normedDelta,0))
logging.info('mean: {:f}'.format(mean))
logging.info('std: {:f}'.format(std))
logging.info('std: {:f}'.format(std))
return (mean<meanTol) & (std < stdTol)
@ -514,7 +516,7 @@ class Test():
table.close() # ... close
for j,label in enumerate(labels): # iterate over object labels
maximum[j] = np.maximum(\
maximum[j] = np.maximum(
maximum[j],
np.amax(np.linalg.norm(table.data[:,table.label_indexrange(label)],
axis=1))
@ -525,12 +527,21 @@ class Test():
for i in range(len(data)):
data[i] /= maximum # normalize each table
logging.info('shape of data {}: {}'.format(i,data[i].shape))
if debug:
logging.debug(str(maximum))
allclose = np.absolute(data[0]-data[1]) <= (atol + rtol*np.absolute(data[1]))
for ok,valA,valB in zip(allclose,data[0],data[1]):
logging.debug('{}:\n{}\n{}'.format(ok,valA,valB))
violators = np.absolute(data[0]-data[1]) > atol + rtol*np.absolute(data[1])
logging.info('shape of violators: {}'.format(violators.shape))
for j,culprits in enumerate(violators):
goodguys = np.logical_not(culprits)
if culprits.any():
logging.info('{} has {}'.format(j,np.sum(culprits)))
logging.info('deviation: {}'.format(np.absolute(data[0][j]-data[1][j])[culprits]))
logging.info('data : {}'.format(np.absolute(data[1][j])[culprits]))
logging.info('deviation: {}'.format(np.absolute(data[0][j]-data[1][j])[goodguys]))
logging.info('data : {}'.format(np.absolute(data[1][j])[goodguys]))
# for ok,valA,valB in zip(allclose,data[0],data[1]):
# logging.debug('{}:\n{}\n{}'.format(ok,valA,valB))
allclose = True # start optimistic
for i in range(1,len(data)):
@ -565,7 +576,7 @@ class Test():
ret = culprit
if culprit == 0:
msg = 'The test passed' if len(self.variants) == 1 \
msg = 'The test passed.' if (self.options.select is not None or len(self.variants) == 1) \
else 'All {} tests passed.'.format(len(self.variants))
elif culprit == -1:
msg = 'Warning: Could not start test...'

View File

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

BIN
misc/DAMASK_QR-CodeBW.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

31
misc/DAMASK_QR-CodeBW.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 9.0 KiB

View File

@ -14,26 +14,27 @@ scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add data in column(s) of second ASCIItable selected from row that is given by the value in a mapping column.
Add data in column(s) of mapped ASCIItable selected from the row indexed by the value in a mapping column.
Row numbers start at 1.
""", version = scriptID)
parser.add_option('-c','--map',
dest = 'map',
parser.add_option('--index',
dest = 'index',
type = 'string', metavar = 'string',
help = 'heading of column containing row mapping')
help = 'column label containing row index')
parser.add_option('-o','--offset',
dest = 'offset',
type = 'int', metavar = 'int',
help = 'offset between mapping column value and actual row in mapped table [%default]')
help = 'constant offset for index column value [%default]')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help='column label(s) to be mapped')
help = 'column label(s) to be appended')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'mapped ASCIItable')
help = 'indexed ASCIItable')
parser.set_defaults(offset = 0,
)
@ -42,24 +43,25 @@ parser.set_defaults(offset = 0,
if options.label is None:
parser.error('no data columns specified.')
if options.map is None:
parser.error('no mapping column given.')
if options.index is None:
parser.error('no index column given.')
# ------------------------------------------ process mapping ASCIItable ---------------------------
# ------------------------------------------ process indexed ASCIItable ---------------------------
if options.asciitable is not None and os.path.isfile(options.asciitable):
mappedTable = damask.ASCIItable(name = options.asciitable,
buffered = False,
readonly = True)
mappedTable.head_read() # read ASCII header info of mapped table
missing_labels = mappedTable.data_readArray(options.label)
indexedTable = damask.ASCIItable(name = options.asciitable,
buffered = False,
readonly = True)
indexedTable.head_read() # read ASCII header info of indexed table
missing_labels = indexedTable.data_readArray(options.label)
indexedTable.close() # close input ASCII table
if len(missing_labels) > 0:
damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
else:
parser.error('no mapped ASCIItable given.')
parser.error('no indexed ASCIItable given.')
# --- loop over input files -------------------------------------------------------------------------
@ -79,8 +81,8 @@ for name in filenames:
errors = []
mappedColumn = table.label_index(options.map)
if mappedColumn < 0: errors.append('mapping column {} not found.'.format(options.map))
indexColumn = table.label_index(options.index)
if indexColumn < 0: errors.append('index column {} not found.'.format(options.index))
if errors != []:
damask.util.croak(errors)
@ -90,7 +92,7 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(mappedTable.labels(raw = True)) # extend ASCII header with new labels
table.labels_append(indexedTable.labels(raw = True)) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
@ -98,13 +100,11 @@ for name in filenames:
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
try:
table.data_append(mappedTable.data[int(round(float(table.data[mappedColumn])))+options.offset-1]) # add all mapped data types
table.data_append(indexedTable.data[int(round(float(table.data[indexColumn])))+options.offset-1]) # add all mapped data types
except IndexError:
table.data_append(np.nan*np.ones_like(mappedTable.data[0]))
table.data_append(np.nan*np.ones_like(indexedTable.data[0]))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables
mappedTable.close() # close mapped input ASCII table

110
processing/post/addLinked.py Executable file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os,sys
import numpy as np
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add data of selected column(s) from (first) row of second ASCIItable that shares the linking column value.
""", version = scriptID)
parser.add_option('--link',
dest = 'link', nargs = 2,
type = 'string', metavar = 'string string',
help = 'column labels containing linked values')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) to be appended')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'linked ASCIItable')
parser.set_defaults()
(options,filenames) = parser.parse_args()
if options.label is None:
parser.error('no data columns specified.')
if options.link is None:
parser.error('no linking columns given.')
# -------------------------------------- process linked ASCIItable --------------------------------
if options.asciitable is not None and os.path.isfile(options.asciitable):
linkedTable = damask.ASCIItable(name = options.asciitable,
buffered = False,
readonly = True)
linkedTable.head_read() # read ASCII header info of linked table
if linkedTable.label_dimension(options.link[1]) != 1:
parser.error('linking column {} needs to be scalar valued.'.format(options.link[1]))
missing_labels = linkedTable.data_readArray([options.link[1]]+options.label)
linkedTable.close() # close linked ASCII table
if len(missing_labels) > 0:
damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
index = linkedTable.data[:,0]
data = linkedTable.data[:,1:]
else:
parser.error('no linked ASCIItable given.')
# --- loop over input files -----------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
linkColumn = table.label_index(options.link[0])
if linkColumn < 0: errors.append('linking column {} not found.'.format(options.link[0]))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(linkedTable.labels(raw = True)[1:]) # extend with new labels (except for linked column)
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
try:
table.data_append(data[np.argwhere(index == float(table.data[linkColumn]))[0]]) # add data from first matching line
except IndexError:
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -61,9 +61,8 @@ parser.set_defaults(condition = '',
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False)
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)

View File

@ -7,14 +7,11 @@ import numpy as np
from optparse import OptionParser, OptionGroup
import damask
#"https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions"
def periodicAverage(Points, Box):
theta = (Points/Box[1]) * (2.0*np.pi)
xi = np.cos(theta)
zeta = np.sin(theta)
theta_avg = np.arctan2(-1.0*zeta.mean(), -1.0*xi.mean()) + np.pi
Pmean = Box[1] * theta_avg/(2.0*np.pi)
return Pmean
def periodicAverage(coords, limits):
"""Centroid in periodic domain, see https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions"""
theta = 2.0*np.pi * (coords - limits[0])/(limits[1] - limits[0])
theta_avg = np.pi + np.arctan2(-np.sin(theta).mean(axis=0), -np.cos(theta).mean(axis=0))
return limits[0] + theta_avg * (limits[1] - limits[0])/2.0/np.pi
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
@ -26,6 +23,7 @@ scriptID = ' '.join([scriptName,damask.version])
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Apply a user-specified function to condense all rows for which column 'label' has identical values into a single row.
Output table will contain as many rows as there are different (unique) values in the grouping column.
Periodic domain averaging of coordinate values is supported.
Examples:
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
@ -43,23 +41,22 @@ parser.add_option('-a','--all',
dest = 'all',
action = 'store_true',
help = 'apply mapping function also to grouping column')
group = OptionGroup(parser, "periodic averaging", "")
group.add_option('-p','--periodic',
dest = 'periodic',
action = 'store_true',
help = 'calculate average in periodic space defined by periodic length [%default]')
group.add_option('--boundary',
dest = 'boundary', metavar = 'MIN MAX',
type = 'float', nargs = 2,
help = 'define periodic box end points %default')
group.add_option ('-p','--periodic',
dest = 'periodic',
action = 'extend', metavar = '<string LIST>',
help = 'coordinate label(s) to average across periodic domain')
group.add_option ('--limits',
dest = 'boundary',
type = 'float', metavar = 'float float', nargs = 2,
help = 'min and max of periodic domain %default')
parser.add_option_group(group)
parser.set_defaults(function = 'np.average',
all = False,
periodic = False,
boundary = [0.0, 1.0])
(options,filenames) = parser.parse_args()
@ -108,6 +105,7 @@ for name in filenames:
# ------------------------------------------ process data --------------------------------
table.data_readArray()
indexrange = table.label_indexrange(options.periodic) if options.periodic is not None else []
rows,cols = table.data.shape
table.data = table.data[np.lexsort([table.data[:,grpColumn]])] # sort data by grpColumn
@ -117,10 +115,10 @@ for name in filenames:
grpTable = np.empty((len(values), cols)) # initialize output
for i in range(len(values)): # iterate over groups (unique values in grpColumn)
if options.periodic :
grpTable[i] = periodicAverage(table.data[index[i]:index[i+1]],options.boundary) # apply periodicAverage mapping function
else :
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply mapping function
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply (general) mapping function
grpTable[i,indexrange] = \
periodicAverage(table.data[index[i]:index[i+1],indexrange],options.boundary) # apply periodicAverage mapping function
if not options.all: grpTable[i,grpColumn] = table.data[index[i],grpColumn] # restore grouping column value
table.data = grpTable

View File

@ -34,10 +34,9 @@ parser.set_defaults(pos = 'pos',
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName,name)

View File

@ -1,13 +1,20 @@
#!/usr/bin/env bash
if [[ "$1" == "-f" || "$1" == "--float" ]]; then
shift
arg='--float'
else
arg=''
fi
for geom in "$@"
do
geom_toTable \
geom_toTable $arg \
< $geom \
| \
vtk_rectilinearGrid > ${geom%.*}.vtk
geom_toTable \
geom_toTable $arg \
< $geom \
| \
vtk_addRectilinearGridData \

View File

@ -178,6 +178,10 @@ parser.add_option_group(group)
group = OptionGroup(parser, "Configuration","")
group.add_option('--no-config',
dest = 'config',
action = 'store_false',
help = 'omit material configuration header')
group.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
@ -203,6 +207,7 @@ parser.set_defaults(pos = 'pos',
cpus = 2,
laguerre = False,
nonperiodic = False,
config = True,
)
(options,filenames) = parser.parse_args()
@ -303,20 +308,21 @@ for name in filenames:
config_header = []
formatwidth = 1+int(math.log10(NgrainIDs))
config_header += ['<microstructure>']
for i,ID in enumerate(grainIDs):
config_header += ['[Grain{}]'.format(str(ID).zfill(formatwidth)),
'crystallite {}'.format(options.crystallite),
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(options.phase,str(ID).rjust(formatwidth)),
]
if hasEulers:
config_header += ['<texture>']
for ID in grainIDs:
eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id
if options.config:
config_header += ['<microstructure>']
for i,ID in enumerate(grainIDs):
config_header += ['[Grain{}]'.format(str(ID).zfill(formatwidth)),
'(gauss)\tphi1 {:g}\tPhi {:g}\tphi2 {:g}\tscatter 0.0\tfraction 1.0'.format(*eulers[eulerID])
'crystallite {}'.format(options.crystallite),
'(constituent)\tphase {}\ttexture {}\tfraction 1.0'.format(options.phase,str(ID).rjust(formatwidth)),
]
if options.axes is not None: config_header.append('axes\t{} {} {}'.format(*options.axes))
if hasEulers:
config_header += ['<texture>']
for ID in grainIDs:
eulerID = np.nonzero(grains == ID)[0][0] # find first occurrence of this grain id
config_header += ['[Grain{}]'.format(str(ID).zfill(formatwidth)),
'(gauss)\tphi1 {:g}\tPhi {:g}\tphi2 {:g}\tscatter 0.0\tfraction 1.0'.format(*eulers[eulerID])
]
if options.axes is not None: config_header.append('axes\t{} {} {}'.format(*options.axes))
table.labels_clear()
table.info_clear()

View File

@ -22,32 +22,46 @@ The final geometry is assembled by selecting at each voxel that grain index for
""", version = scriptID)
parser.add_option('-d', '--distance', dest='d', type='int', metavar='int',
help='diffusion distance in voxels [%default]')
parser.add_option('-N', '--smooth', dest='N', type='int', metavar='int',
help='N for curvature flow [%default]')
parser.add_option('-r', '--renumber', dest='renumber', action='store_true',
help='renumber microstructure indices from 1...N [%default]')
parser.add_option('-i', '--immutable', action='extend', dest='immutable', metavar = '<int LIST>',
help='list of immutable microstructures')
parser.add_option('-d', '--distance',
dest = 'd',
type = 'float', metavar = 'float',
help = 'diffusion distance in voxels [%default]')
parser.add_option('-N', '--iterations',
dest = 'N',
type = 'int', metavar = 'int',
help = 'curvature flow iterations [%default]')
parser.add_option('-i', '--immutable',
action = 'extend', dest = 'immutable', metavar = '<int LIST>',
help = 'list of immutable microstructure indices')
parser.add_option('-r', '--renumber',
dest = 'renumber', action='store_true',
help = 'output consecutive microstructure indices')
parser.add_option('--ndimage',
dest = 'ndimage', action='store_true',
help = 'use ndimage.gaussian_filter in lieu of explicit FFT')
parser.set_defaults(d = 1)
parser.set_defaults(N = 1)
parser.set_defaults(renumber = False)
parser.set_defaults(immutable = [])
parser.set_defaults(d = 1,
N = 1,
immutable = [],
renumber = False,
ndimage = False,
)
(options, filenames) = parser.parse_args()
options.immutable = map(int,options.immutable)
# --- loop over input files -------------------------------------------------------------------------
getInterfaceEnergy = lambda A,B: np.float32((A*B != 0)*(A != B)*1.0) # 1.0 if A & B are distinct & nonzero, 0.0 otherwise
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
# --- loop over input files -----------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name,
buffered = False, labeled = False)
try: table = damask.ASCIItable(name = name,
buffered = False,
labeled = False)
except: continue
damask.util.report(scriptName,name)
@ -56,12 +70,12 @@ for name in filenames:
table.head_read()
info,extra_header = table.head_getGeom()
damask.util.croak(['grid a b c: %s'%(' x '.join(map(str,info['grid']))),
'size x y z: %s'%(' x '.join(map(str,info['size']))),
'origin x y z: %s'%(' : '.join(map(str,info['origin']))),
'homogenization: %i'%info['homogenization'],
'microstructures: %i'%info['microstructures'],
])
damask.util.croak(['grid a b c: {}'.format(' x '.join(map(str,info['grid']))),
'size x y z: {}'.format(' x '.join(map(str,info['size']))),
'origin x y z: {}'.format(' : '.join(map(str,info['origin']))),
'homogenization: {}'.format(info['homogenization']),
'microstructures: {}'.format(info['microstructures']),
])
errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
@ -71,39 +85,43 @@ for name in filenames:
table.close(dismiss = True)
continue
# --- read data ------------------------------------------------------------------------------------
microstructure = np.tile(np.array(table.microstructure_read(info['grid']),'i').reshape(info['grid'],order='F'),
np.where(info['grid'] == 1, 2,1)) # make one copy along dimensions with grid == 1
# --- read data -----------------------------------------------------------------------------------
microstructure = np.tile(table.microstructure_read(info['grid']).reshape(info['grid'],order='F'),
np.where(info['grid'] == 1, 2,1)) # make one copy along dimensions with grid == 1
grid = np.array(microstructure.shape)
#--- initialize support data -----------------------------------------------------------------------
# --- initialize support data ---------------------------------------------------------------------
# store a copy the initial microstructure to find locations of immutable indices
microstructure_original = np.copy(microstructure)
X,Y,Z = np.mgrid[0:grid[0],0:grid[1],0:grid[2]]
if not options.ndimage:
X,Y,Z = np.mgrid[0:grid[0],0:grid[1],0:grid[2]]
# Calculates gaussian weights for simulating 3d diffusion
gauss = np.exp(-(X*X + Y*Y + Z*Z)/(2.0*options.d*options.d))/math.pow(2.0*np.pi*options.d*options.d,1.5)
gauss[:,:,grid[2]/2::] = gauss[:,:,int(round(grid[2]/2.))-1::-1] # trying to cope with uneven (odd) grid size
gauss[:,grid[1]/2::,:] = gauss[:,int(round(grid[1]/2.))-1::-1,:]
gauss[grid[0]/2::,:,:] = gauss[int(round(grid[0]/2.))-1::-1,:,:]
gauss = np.fft.rfftn(gauss)
getInterfaceEnergy = lambda A,B: (A*B != 0)*(A != B)*1.0 # 1.0 if A & B are distinct & nonzero, 0.0 otherwise
struc = ndimage.generate_binary_structure(3,1) # 3D von Neumann neighborhood
# Calculates gaussian weights for simulating 3d diffusion
gauss = np.exp(-(X*X + Y*Y + Z*Z)/(2.0*options.d*options.d),dtype=np.float32) \
/np.power(2.0*np.pi*options.d*options.d,(3.0 - np.count_nonzero(info['grid'] == 1))/2.,dtype=np.float32)
gauss[:,:,:grid[2]/2:-1] = gauss[:,:,1:(grid[2]+1)/2] # trying to cope with uneven (odd) grid size
gauss[:,:grid[1]/2:-1,:] = gauss[:,1:(grid[1]+1)/2,:]
gauss[:grid[0]/2:-1,:,:] = gauss[1:(grid[0]+1)/2,:,:]
gauss = np.fft.rfftn(gauss).astype(np.complex64)
for smoothIter in range(options.N):
periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # periodically extend the microstructure
interfaceEnergy = np.zeros(microstructure.shape)
# replace immutable microstructures with closest mutable ones
index = ndimage.morphology.distance_transform_edt(np.in1d(microstructure,options.immutable).reshape(grid),
return_distances = False,
return_indices = True)
microstructure = microstructure[index[0],
index[1],
index[2]]
interfaceEnergy = np.zeros(microstructure.shape,dtype=np.float32)
for i in (-1,0,1):
for j in (-1,0,1):
for k in (-1,0,1):
# assign interfacial energy to all voxels that have a differing neighbor (in Moore neighborhood)
# assign interfacial energy to all voxels that have a differing neighbor (in Moore neighborhood)
interfaceEnergy = np.maximum(interfaceEnergy,
getInterfaceEnergy(microstructure,np.roll(np.roll(np.roll(
microstructure,i,axis=0), j,axis=1), k,axis=2)))
@ -112,44 +130,67 @@ for name in filenames:
periodic_interfaceEnergy = np.tile(interfaceEnergy,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2]
# transform bulk volume (i.e. where interfacial energy is zero)
# transform bulk volume (i.e. where interfacial energy remained zero), store index of closest boundary voxel
index = ndimage.morphology.distance_transform_edt(periodic_interfaceEnergy == 0.,
return_distances = False,
return_indices = True)
# want array index of nearest voxel on periodically extended boundary
periodic_bulkEnergy = periodic_interfaceEnergy[index[0],
index[1],
index[2]].reshape(2*grid) # fill bulk with energy of nearest interface
diffusedEnergy = np.fft.irfftn(np.fft.rfftn(
np.where(
ndimage.morphology.binary_dilation(interfaceEnergy > 0.,
structure = struc,
iterations = options.d/2 + 1), # fat boundary | PE: why 2d-1? I would argue for d/2 + 1
periodic_bulkEnergy[grid[0]/2:-grid[0]/2, # retain filled energy on fat boundary...
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2], # ...and zero everywhere else
0.))*gauss)
periodic_diffusedEnergy = np.tile(diffusedEnergy,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # periodically extend the smoothed bulk energy
# transform voxels close to interface region | question PE: what motivates 1/2 (could be any small number, or)?
index = ndimage.morphology.distance_transform_edt(periodic_diffusedEnergy >= 0.5,
index[2]].reshape(2*grid) # fill bulk with energy of nearest interface
if options.ndimage:
periodic_diffusedEnergy = ndimage.gaussian_filter(
np.where(ndimage.morphology.binary_dilation(periodic_interfaceEnergy > 0.,
structure = struc,
iterations = int(round(options.d*2.))-1, # fat boundary
),
periodic_bulkEnergy, # ...and zero everywhere else
0.),
sigma = options.d)
else:
diffusedEnergy = np.fft.irfftn(np.fft.rfftn(
np.where(
ndimage.morphology.binary_dilation(interfaceEnergy > 0.,
structure = struc,
iterations = int(round(options.d*2.))-1),# fat boundary
periodic_bulkEnergy[grid[0]/2:-grid[0]/2, # retain filled energy on fat boundary...
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2], # ...and zero everywhere else
0.)).astype(np.complex64) *
gauss).astype(np.float32)
periodic_diffusedEnergy = np.tile(diffusedEnergy,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # periodically extend the smoothed bulk energy
# transform voxels close to interface region
index = ndimage.morphology.distance_transform_edt(periodic_diffusedEnergy >= 0.95*np.amax(periodic_diffusedEnergy),
return_distances = False,
return_indices = True) # want index of closest bulk grain
return_indices = True) # want index of closest bulk grain
periodic_microstructure = np.tile(microstructure,(3,3,3))[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # periodically extend the microstructure
microstructure = periodic_microstructure[index[0],
index[1],
index[2]].reshape(2*grid)[grid[0]/2:-grid[0]/2,
grid[1]/2:-grid[1]/2,
grid[2]/2:-grid[2]/2] # extent grains into interface region
grid[2]/2:-grid[2]/2] # extent grains into interface region
immutable = np.zeros(microstructure.shape, dtype=bool)
# find locations where immutable microstructures have been or are now
immutable = np.zeros(microstructure.shape, dtype=np.bool)
# find locations where immutable microstructures have been (or are now)
for micro in options.immutable:
immutable += np.logical_or(microstructure == micro, microstructure_original == micro)
# undo any changes involving immutable microstructures
microstructure = np.where(immutable, microstructure_original,microstructure)
immutable += microstructure_original == micro
# --- renumber to sequence 1...Ngrains if requested ------------------------------------------------
# undo any changes involving immutable microstructures
microstructure = np.where(immutable, microstructure_original,microstructure)
# --- renumber to sequence 1...Ngrains if requested -----------------------------------------------
# http://stackoverflow.com/questions/10741346/np-frequency-counts-for-unique-values-in-an-array
if options.renumber:
@ -162,13 +203,14 @@ for name in filenames:
newInfo = {'microstructures': 0,}
newInfo['microstructures'] = microstructure.max()
# --- report ---------------------------------------------------------------------------------------
# --- report --------------------------------------------------------------------------------------
remarks = []
if (newInfo['microstructures'] != info['microstructures']): remarks.append('--> microstructures: %i'%newInfo['microstructures'])
if newInfo['microstructures'] != info['microstructures']:
remarks.append('--> microstructures: {}'.format(newInfo['microstructures']))
if remarks != []: damask.util.croak(remarks)
# --- write header ---------------------------------------------------------------------------------
# --- write header --------------------------------------------------------------------------------
table.labels_clear()
table.info_clear()
@ -185,8 +227,11 @@ for name in filenames:
# --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure.max())+1))
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
table.data = microstructure[::1 if info['grid'][0]>1 else 2,
::1 if info['grid'][1]>1 else 2,
::1 if info['grid'][2]>1 else 2,].\
reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%{}i'.format(formatwidth),delimiter = ' ')
# --- output finalization --------------------------------------------------------------------------

View File

@ -19,8 +19,17 @@ Translate geom description into ASCIItable containing position and microstructur
""", version = scriptID)
parser.add_option('--float',
dest = 'real',
action = 'store_true',
help = 'use float input')
parser.set_defaults(real = False,
)
(options, filenames) = parser.parse_args()
datatype = 'f' if options.real else 'i'
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
@ -56,7 +65,7 @@ for name in filenames:
# --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid'])
microstructure = table.microstructure_read(info['grid'],datatype)
# ------------------------------------------ assemble header ---------------------------------------

View File

@ -109,7 +109,6 @@ for name in filenames:
for j in range(Ny):
grid[0] = round((i+0.5)*box[0]*info['grid'][0]/Nx-0.5)+offset[0]
grid[1] = round((j+0.5)*box[1]*info['grid'][1]/Ny-0.5)+offset[1]
damask.util.croak('x,y coord on surface: {},{}...'.format(*grid[:2]))
for k in range(Nz):
grid[2] = k + offset[2]
grid %= info['grid']