Merge branch 'development' into NoCoreModule

This commit is contained in:
Martin Diehl 2016-04-15 00:05:38 +02:00
commit 573d10f1e1
24 changed files with 735 additions and 372 deletions

8
.gitattributes vendored Normal file
View File

@ -0,0 +1,8 @@
# from https://help.github.com/articles/dealing-with-line-endings/
#
# always use LF, even if the files are edited on windows, they need to be compiled/used on unix
* text eol=lf
# Denote all files that are truly binary and should not be modified.
*.png binary
*.jpg binary

View File

@ -2,7 +2,7 @@
# usage: source DAMASK_env.sh # usage: source DAMASK_env.sh
if [ "$OSTYPE" == "linux-gnu" ] || [ "$OSTYPE" == 'linux' ]; then if [ "$OSTYPE" == "linux-gnu" ] || [ "$OSTYPE" == 'linux' ]; then
DAMASK_ROOT=$(readlink -f "`dirname $BASH_SOURCE`") DAMASK_ROOT=$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "`dirname $BASH_SOURCE`")
else else
[[ "${BASH_SOURCE::1}" == "/" ]] && BASE="" || BASE="`pwd`/" [[ "${BASH_SOURCE::1}" == "/" ]] && BASE="" || BASE="`pwd`/"
STAT=$(stat "`dirname $BASE$BASH_SOURCE`") STAT=$(stat "`dirname $BASE$BASH_SOURCE`")
@ -18,11 +18,11 @@ fi
SOLVER=`which DAMASK_spectral 2>/dev/null` SOLVER=`which DAMASK_spectral 2>/dev/null`
if [ "x$SOLVER" == "x" ]; then if [ "x$SOLVER" == "x" ]; then
export SOLVER='Not found!' SOLVER='Not found!'
fi fi
PROCESSING=`which postResults 2>/dev/null` PROCESSING=`which postResults 2>/dev/null`
if [ "x$PROCESSING" == "x" ]; then if [ "x$PROCESSING" == "x" ]; then
export PROCESSING='Not found!' PROCESSING='Not found!'
fi fi
# according to http://software.intel.com/en-us/forums/topic/501500 # according to http://software.intel.com/en-us/forums/topic/501500
@ -55,7 +55,8 @@ if [ ! -z "$PS1" ]; then
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
if [ "x$PETSC_DIR" != "x" ]; then if [ "x$PETSC_DIR" != "x" ]; then
echo "PETSc location $PETSC_DIR" echo "PETSc location $PETSC_DIR"
[[ `readlink -f $PETSC_DIR` == $PETSC_DIR ]] || echo " ~~> "`readlink -f $PETSC_DIR` [[ `python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR"` == $PETSC_DIR ]] \
|| echo " ~~> "`python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR"`
fi fi
[[ "x$PETSC_ARCH" != "x" ]] && echo "PETSc architecture $PETSC_ARCH" [[ "x$PETSC_ARCH" != "x" ]] && echo "PETSc architecture $PETSC_ARCH"
echo "MSC.Marc/Mentat $MSC_ROOT" echo "MSC.Marc/Mentat $MSC_ROOT"

View File

@ -1 +1 @@
v2.0.0-43-ge39441f v2.0.0-97-g8b27de7

View File

@ -13,7 +13,8 @@ program DAMASK_spectral
pInt, & pInt, &
pLongInt, & pLongInt, &
pReal, & pReal, &
tol_math_check tol_math_check, &
dNeq
use DAMASK_interface, only: & use DAMASK_interface, only: &
DAMASK_interface_init, & DAMASK_interface_init, &
loadCaseFile, & loadCaseFile, &
@ -147,7 +148,9 @@ program DAMASK_spectral
MPI_file_seek, & MPI_file_seek, &
MPI_file_get_position, & MPI_file_get_position, &
MPI_file_write, & MPI_file_write, &
MPI_allreduce MPI_abort, &
MPI_allreduce, &
PETScFinalize
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! init DAMASK (all modules) ! init DAMASK (all modules)
@ -339,7 +342,7 @@ program DAMASK_spectral
reshape(spread(tol_math_check,1,9),[ 3,3]))& reshape(spread(tol_math_check,1,9),[ 3,3]))&
.or. abs(math_det33(loadCases(currentLoadCase)%rotation)) > & .or. abs(math_det33(loadCases(currentLoadCase)%rotation)) > &
1.0_pReal + tol_math_check) errorID = 846_pInt ! given rotation matrix contains strain 1.0_pReal + tol_math_check) errorID = 846_pInt ! given rotation matrix contains strain
if (any(loadCases(currentLoadCase)%rotation /= math_I3)) & if (any(dNeq(loadCases(currentLoadCase)%rotation, math_I3))) &
write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',& write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',&
math_transpose33(loadCases(currentLoadCase)%rotation) math_transpose33(loadCases(currentLoadCase)%rotation)
if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment
@ -423,17 +426,21 @@ program DAMASK_spectral
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! prepare MPI parallel out (including opening of file) ! prepare MPI parallel out (including opening of file)
allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND) allocate(outputSize(worldsize), source = 0_MPI_OFFSET_KIND)
outputSize(worldrank+1) = int(size(materialpoint_results)*pReal,MPI_OFFSET_KIND) outputSize(worldrank+1) = size(materialpoint_results,kind=MPI_OFFSET_KIND)*int(pReal,MPI_OFFSET_KIND)
call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process call MPI_allreduce(MPI_IN_PLACE,outputSize,worldsize,MPI_LONG,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_allreduce')
call MPI_file_open(PETSC_COMM_WORLD, & call MPI_file_open(PETSC_COMM_WORLD, &
trim(getSolverWorkingDirectoryName())//trim(getSolverJobName())//'.spectralOut', & trim(getSolverWorkingDirectoryName())//trim(getSolverJobName())//'.spectralOut', &
MPI_MODE_WRONLY + MPI_MODE_APPEND, & MPI_MODE_WRONLY + MPI_MODE_APPEND, &
MPI_INFO_NULL, & MPI_INFO_NULL, &
resUnit, & resUnit, &
ierr) ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_open')
call MPI_file_get_position(resUnit,fileOffset,ierr) ! get offset from header call MPI_file_get_position(resUnit,fileOffset,ierr) ! get offset from header
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_get_position')
fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me) fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me)
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr) call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
if (.not. appendToOutFile) then ! if not restarting, write 0th increment if (.not. appendToOutFile) then ! if not restarting, write 0th increment
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
@ -443,6 +450,7 @@ program DAMASK_spectral
[(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), & [(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), &
(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,& (outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,&
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write')
enddo enddo
fileOffset = fileOffset + sum(outputSize) ! forward to current file position fileOffset = fileOffset + sum(outputSize) ! forward to current file position
if (worldrank == 0) & if (worldrank == 0) &
@ -643,13 +651,15 @@ program DAMASK_spectral
write(6,'(1/,a)') ' ... writing results to file ......................................' write(6,'(1/,a)') ' ... writing results to file ......................................'
call materialpoint_postResults() call materialpoint_postResults()
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr) call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, & outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, &
min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt) min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),& call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
[(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), & [(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), &
(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,& (outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,&
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr) MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_write')
enddo enddo
fileOffset = fileOffset + sum(outputSize) ! forward to current file position fileOffset = fileOffset + sum(outputSize) ! forward to current file position
endif endif
@ -698,7 +708,7 @@ program DAMASK_spectral
enddo enddo
call utilities_destroy() call utilities_destroy()
call PetscFinalize(ierr); CHKERRQ(ierr) call PETScFinalize(ierr); CHKERRQ(ierr)
if (notConvergedCounter > 0_pInt) call quit(3_pInt) ! error if some are not converged if (notConvergedCounter > 0_pInt) call quit(3_pInt) ! error if some are not converged
call quit(0_pInt) ! no complains ;) call quit(0_pInt) ! no complains ;)

View File

@ -1669,7 +1669,9 @@ subroutine IO_error(error_ID,el,ip,g,ext_msg)
msg = 'unknown filter type selected' msg = 'unknown filter type selected'
case (893_pInt) case (893_pInt)
msg = 'PETSc: SNES_DIVERGED_FNORM_NAN' msg = 'PETSc: SNES_DIVERGED_FNORM_NAN'
case (894_pInt)
msg = 'MPI error'
!------------------------------------------------------------------------------------------------- !-------------------------------------------------------------------------------------------------
! error messages related to parsing of Abaqus input file ! error messages related to parsing of Abaqus input file
case (900_pInt) case (900_pInt)

View File

@ -1280,7 +1280,7 @@ subroutine material_populateGrains
integer(pInt) :: t,e,i,g,j,m,c,r,homog,micro,sgn,hme, myDebug, & integer(pInt) :: t,e,i,g,j,m,c,r,homog,micro,sgn,hme, myDebug, &
phaseID,textureID,dGrains,myNgrains,myNorientations,myNconstituents, & phaseID,textureID,dGrains,myNgrains,myNorientations,myNconstituents, &
grain,constituentGrain,ipGrain,symExtension, ip grain,constituentGrain,ipGrain,symExtension, ip
real(pReal) :: extreme,rnd real(pReal) :: deviation,extreme,rnd
integer(pInt), dimension (:,:), allocatable :: Nelems ! counts number of elements in homog, micro array integer(pInt), dimension (:,:), allocatable :: Nelems ! counts number of elements in homog, micro array
type(p_intvec), dimension (:,:), allocatable :: elemsOfHomogMicro ! lists element number in homog, micro array type(p_intvec), dimension (:,:), allocatable :: elemsOfHomogMicro ! lists element number in homog, micro array
@ -1407,8 +1407,11 @@ subroutine material_populateGrains
extreme = 0.0_pReal extreme = 0.0_pReal
t = 0_pInt t = 0_pInt
do i = 1_pInt,myNconstituents ! find largest deviator do i = 1_pInt,myNconstituents ! find largest deviator
if (real(sgn,pReal)*log(NgrainsOfConstituent(i)/myNgrains/microstructure_fraction(i,micro)) > extreme) then deviation = real(sgn,pReal)*log( microstructure_fraction(i,micro) / &
extreme = real(sgn,pReal)*log(NgrainsOfConstituent(i)/myNgrains/microstructure_fraction(i,micro)) !-------------------------------- &
(real(NgrainsOfConstituent(i),pReal)/real(myNgrains,pReal) ) )
if (deviation > extreme) then
extreme = deviation
t = i t = i
endif endif
enddo enddo

View File

@ -7,14 +7,11 @@
!! untextured polycrystal !! untextured polycrystal
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module plastic_isotropic module plastic_isotropic
#ifdef HDF
use hdf5, only: &
HID_T
#endif
use prec, only: & use prec, only: &
pReal,& pReal,&
pInt pInt, &
DAMASK_NaN
implicit none implicit none
private private
@ -40,20 +37,20 @@ module plastic_isotropic
integer(kind(undefined_ID)), allocatable, dimension(:) :: & integer(kind(undefined_ID)), allocatable, dimension(:) :: &
outputID outputID
real(pReal) :: & real(pReal) :: &
fTaylor, & fTaylor = DAMASK_NaN, &
tau0, & tau0 = DAMASK_NaN, &
gdot0, & gdot0 = DAMASK_NaN, &
n, & n = DAMASK_NaN, &
h0, & h0 = DAMASK_NaN, &
h0_slopeLnRate, & h0_slopeLnRate = 0.0_pReal, &
tausat, & tausat = DAMASK_NaN, &
a, & a = DAMASK_NaN, &
aTolFlowstress, & aTolFlowstress = 1.0_pReal, &
aTolShear , & aTolShear = 1.0e-6_pReal, &
tausat_SinhFitA=0.0_pReal, & tausat_SinhFitA= 0.0_pReal, &
tausat_SinhFitB=0.0_pReal, & tausat_SinhFitB= 0.0_pReal, &
tausat_SinhFitC=0.0_pReal, & tausat_SinhFitC= 0.0_pReal, &
tausat_SinhFitD=0.0_pReal tausat_SinhFitD= 0.0_pReal
logical :: & logical :: &
dilatation = .false. dilatation = .false.
end type end type
@ -474,7 +471,8 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e
implicit none implicit none
real(pReal), dimension(3,3), intent(out) :: & real(pReal), dimension(3,3), intent(out) :: &
Li !< plastic velocity gradient Li !< plastic velocity gradient
real(pReal), dimension(3,3,3,3), intent(out) :: &
dLi_dTstar_3333 !< derivative of Li with respect to Tstar as 4th order tensor
real(pReal), dimension(6), intent(in) :: & real(pReal), dimension(6), intent(in) :: &
Tstar_v !< 2nd Piola Kirchhoff stress tensor in Mandel notation Tstar_v !< 2nd Piola Kirchhoff stress tensor in Mandel notation
integer(pInt), intent(in) :: & integer(pInt), intent(in) :: &
@ -484,9 +482,7 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e
real(pReal), dimension(3,3) :: & real(pReal), dimension(3,3) :: &
Tstar_sph_33 !< sphiatoric part of the 2nd Piola Kirchhoff stress tensor as 2nd order tensor Tstar_sph_33 !< sphiatoric part of the 2nd Piola Kirchhoff stress tensor as 2nd order tensor
real(pReal), dimension(3,3,3,3), intent(out) :: & real(pReal) :: &
dLi_dTstar_3333 !< derivative of Li with respect to Tstar as 4th order tensor
real(pReal) :: &
gamma_dot, & !< strainrate gamma_dot, & !< strainrate
norm_Tstar_sph, & !< euclidean norm of Tstar_sph norm_Tstar_sph, & !< euclidean norm of Tstar_sph
squarenorm_Tstar_sph !< square of the euclidean norm of Tstar_sph squarenorm_Tstar_sph !< square of the euclidean norm of Tstar_sph
@ -523,6 +519,9 @@ subroutine plastic_isotropic_LiAndItsTangent(Li,dLi_dTstar_3333,Tstar_v,ipc,ip,e
dLi_dTstar_3333 = gamma_dot / param(instance)%fTaylor * & dLi_dTstar_3333 = gamma_dot / param(instance)%fTaylor * &
dLi_dTstar_3333 / norm_Tstar_sph dLi_dTstar_3333 / norm_Tstar_sph
endif endif
else
Li = 0.0_pReal
dLi_dTstar_3333 = 0.0_pReal
endif endif
end subroutine plastic_isotropic_LiAndItsTangent end subroutine plastic_isotropic_LiAndItsTangent

View File

@ -207,9 +207,6 @@ subroutine plastic_j2_init(fileUnit)
phase = phase + 1_pInt ! advance section counter phase = phase + 1_pInt ! advance section counter
if (phase_plasticity(phase) == PLASTICITY_J2_ID) then if (phase_plasticity(phase) == PLASTICITY_J2_ID) then
instance = phase_plasticityInstance(phase) instance = phase_plasticityInstance(phase)
#ifdef HDF
outID(instance)=HDF5_addGroup(str1,tempResults)
#endif
endif endif
cycle ! skip to next line cycle ! skip to next line
endif endif
@ -226,21 +223,11 @@ subroutine plastic_j2_init(fileUnit)
plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = flowstress_ID plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = flowstress_ID
plastic_j2_output(plastic_j2_Noutput(instance),instance) = & plastic_j2_output(plastic_j2_Noutput(instance),instance) = &
IO_lc(IO_stringValue(line,chunkPos,2_pInt)) IO_lc(IO_stringValue(line,chunkPos,2_pInt))
#ifdef HDF
call HDF5_addScalarDataset(outID(instance),myConstituents,'flowstress','MPa')
allocate(plastic_j2_Output2(instance)%flowstress(myConstituents))
plastic_j2_Output2(instance)%flowstressActive = .true.
#endif
case ('strainrate') case ('strainrate')
plastic_j2_Noutput(instance) = plastic_j2_Noutput(instance) + 1_pInt plastic_j2_Noutput(instance) = plastic_j2_Noutput(instance) + 1_pInt
plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = strainrate_ID plastic_j2_outputID(plastic_j2_Noutput(instance),instance) = strainrate_ID
plastic_j2_output(plastic_j2_Noutput(instance),instance) = & plastic_j2_output(plastic_j2_Noutput(instance),instance) = &
IO_lc(IO_stringValue(line,chunkPos,2_pInt)) IO_lc(IO_stringValue(line,chunkPos,2_pInt))
#ifdef HDF
call HDF5_addScalarDataset(outID(instance),myConstituents,'strainrate','1/s')
allocate(plastic_j2_Output2(instance)%strainrate(myConstituents))
plastic_j2_Output2(instance)%strainrateActive = .true.
#endif
case default case default
end select end select

View File

@ -113,7 +113,9 @@ module prec
public :: & public :: &
prec_init, & prec_init, &
prec_isNaN prec_isNaN, &
dEq, &
dNeq
contains contains

View File

@ -1,37 +0,0 @@
### $Id$ ###
[Tungsten]
elasticity hooke
plasticity dislokmc
### Material parameters ###
lattice_structure bcc
C11 523.0e9 # From Marinica et al. Journal of Physics: Condensed Matter(2013)
C12 202.0e9
C44 161.0e9
grainsize 2.0e-5 # Average grain size [m] 2.0e-5
SolidSolutionStrength 0.0 # Strength due to elements in solid solution
### Dislocation glide parameters ###
#per family
Nslip 12 0
slipburgers 2.72e-10 # Burgers vector of slip system [m]
rhoedge0 1.0e12 # Initial edge dislocation density [m/m**3]
rhoedgedip0 1.0 # Initial edged dipole dislocation density [m/m**3]
Qedge 2.725e-19 # Activation energy for dislocation glide [J]
v0 3560.3 # Initial glide velocity [m/s](kmC)
p_slip 0.16 # p-exponent in glide velocity
q_slip 1.00 # q-exponent in glide velocity
u_slip 2.47 # u-exponent of stress pre-factor (kmC)
s_slip 0.97 # self hardening in glide velocity (kmC)
tau_peierls 2.03e9 # peierls stress [Pa]
#hardening
dipoleformationfactor 0 # to have hardening due to dipole formation off
CLambdaSlip 10.0 # Adj. parameter controlling dislocation mean free path
D0 4.0e-5 # Vacancy diffusion prefactor [m**2/s]
Qsd 4.5e-19 # Activation energy for climb [J]
Catomicvolume 1.0 # Adj. parameter controlling the atomic volume [in b]
Cedgedipmindistance 1.0 # Adj. parameter controlling the minimum dipole distance [in b]
interaction_slipslip 0.2 0.11 0.19 0.15 0.11 0.17

View File

@ -25,7 +25,7 @@ class ASCIItable():
readonly = False, # no reading from file readonly = False, # no reading from file
): ):
self.__IO__ = {'output': [], self.__IO__ = {'output': [],
'buffered': buffered, 'buffered': buffered,
'labeled': labeled, # header contains labels 'labeled': labeled, # header contains labels
'labels': [], # labels according to file info 'labels': [], # labels according to file info
'readBuffer': [], # buffer to hold non-advancing reads 'readBuffer': [], # buffer to hold non-advancing reads
@ -35,18 +35,18 @@ class ASCIItable():
self.__IO__['inPlace'] = not outname and name and not readonly self.__IO__['inPlace'] = not outname and name and not readonly
if self.__IO__['inPlace']: outname = name + self.tmpext # transparently create tmp file if self.__IO__['inPlace']: outname = name + self.tmpext # transparently create tmp file
try: try:
self.__IO__['in'] = (open( name,'r') if os.access( name, os.R_OK) else None) if name else sys.stdin self.__IO__['in'] = (open( name,'r') if os.access( name, os.R_OK) else None) if name else sys.stdin
except TypeError: except TypeError:
self.__IO__['in'] = name self.__IO__['in'] = name
try: try:
self.__IO__['out'] = (open(outname,'w') if (not os.path.isfile(outname) \ self.__IO__['out'] = (open(outname,'w') if (not os.path.isfile(outname) or
or os.access( outname, os.W_OK) \ os.access( outname, os.W_OK)
) \ ) and
and (not self.__IO__['inPlace'] \ (not self.__IO__['inPlace'] or
or not os.path.isfile(name) \ not os.path.isfile(name) or
or os.access( name, os.W_OK) \ os.access( name, os.W_OK)
) else None) if outname else sys.stdout ) else None) if outname else sys.stdout
except TypeError: except TypeError:
self.__IO__['out'] = outname self.__IO__['out'] = outname
@ -272,7 +272,7 @@ class ASCIItable():
for label in labels: for label in labels:
if label is not None: if label is not None:
try: try:
idx.append(int(label)) # column given as integer number? idx.append(int(label)-1) # column given as integer number?
except ValueError: except ValueError:
try: try:
idx.append(self.labels.index(label)) # locate string in label list idx.append(self.labels.index(label)) # locate string in label list
@ -283,7 +283,7 @@ class ASCIItable():
idx.append(-1) # not found... idx.append(-1) # not found...
else: else:
try: try:
idx = int(labels) idx = int(labels)-1 # offset for python array indexing
except ValueError: except ValueError:
try: try:
idx = self.labels.index(labels) idx = self.labels.index(labels)
@ -293,7 +293,7 @@ class ASCIItable():
except ValueError: except ValueError:
idx = None if labels is None else -1 idx = None if labels is None else -1
return np.array(idx) if isinstance(idx,list) else idx return np.array(idx) if isinstance(idx,Iterable) else idx
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def label_dimension(self, def label_dimension(self,
@ -312,7 +312,7 @@ class ASCIItable():
if label is not None: if label is not None:
myDim = -1 myDim = -1
try: # column given as number? try: # column given as number?
idx = int(label) idx = int(label)-1
myDim = 1 # if found has at least dimension 1 myDim = 1 # if found has at least dimension 1
if self.labels[idx].startswith('1_'): # column has multidim indicator? if self.labels[idx].startswith('1_'): # column has multidim indicator?
while idx+myDim < len(self.labels) and self.labels[idx+myDim].startswith("%i_"%(myDim+1)): while idx+myDim < len(self.labels) and self.labels[idx+myDim].startswith("%i_"%(myDim+1)):
@ -331,7 +331,7 @@ class ASCIItable():
dim = -1 # assume invalid label dim = -1 # assume invalid label
idx = -1 idx = -1
try: # column given as number? try: # column given as number?
idx = int(labels) idx = int(labels)-1
dim = 1 # if found has at least dimension 1 dim = 1 # if found has at least dimension 1
if self.labels[idx].startswith('1_'): # column has multidim indicator? if self.labels[idx].startswith('1_'): # column has multidim indicator?
while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)): while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)):
@ -345,7 +345,7 @@ class ASCIItable():
while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)): while idx+dim < len(self.labels) and self.labels[idx+dim].startswith("%i_"%(dim+1)):
dim += 1 # keep adding while going through object dim += 1 # keep adding while going through object
return np.array(dim) if isinstance(dim,list) else dim return np.array(dim) if isinstance(dim,Iterable) else dim
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def label_indexrange(self, def label_indexrange(self,
@ -363,7 +363,7 @@ class ASCIItable():
return map(lambda a,b: xrange(a,a+b), zip(start,dim)) if isinstance(labels, Iterable) and not isinstance(labels, str) \ return map(lambda a,b: xrange(a,a+b), zip(start,dim)) if isinstance(labels, Iterable) and not isinstance(labels, str) \
else xrange(start,start+dim) else xrange(start,start+dim)
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def info_append(self, def info_append(self,
what): what):

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 34 KiB

View File

@ -0,0 +1,222 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,math
import numpy as np
import scipy.ndimage
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
#--------------------------------------------------------------------------------------------------
def cell2node(cellData,grid):
nodeData = 0.0
datalen = np.array(cellData.shape[3:]).prod()
for i in xrange(datalen):
node = scipy.ndimage.convolve(cellData.reshape(tuple(grid)+(datalen,))[...,i],
np.ones((2,2,2))/8., # 2x2x2 neighborhood of cells
mode = 'wrap',
origin = -1, # offset to have cell origin as center
) # now averaged at cell origins
node = np.append(node,node[np.newaxis,0,:,:,...],axis=0) # wrap along z
node = np.append(node,node[:,0,np.newaxis,:,...],axis=1) # wrap along y
node = np.append(node,node[:,:,0,np.newaxis,...],axis=2) # wrap along x
nodeData = node[...,np.newaxis] if i==0 else np.concatenate((nodeData,node[...,np.newaxis]),axis=-1)
return nodeData
#--------------------------------------------------------------------------------------------------
def displacementAvgFFT(F,grid,size,nodal=False,transformed=False):
"""calculate average cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
if nodal:
x, y, z = np.meshgrid(np.linspace(0,size[0],1+grid[0]),
np.linspace(0,size[1],1+grid[1]),
np.linspace(0,size[2],1+grid[2]),
indexing = 'ij')
else:
x, y, z = np.meshgrid(np.linspace(0,size[0],grid[0],endpoint=False),
np.linspace(0,size[1],grid[1],endpoint=False),
np.linspace(0,size[2],grid[2],endpoint=False),
indexing = 'ij')
origCoords = np.concatenate((z[:,:,:,None],y[:,:,:,None],x[:,:,:,None]),axis = 3)
F_fourier = F if transformed else np.fft.rfftn(F,axes=(0,1,2)) # transform or use provided data
Favg = np.real(F_fourier[0,0,0,:,:])/grid.prod() # take zero freq for average
avgDisplacement = np.einsum('ml,ijkl->ijkm',Favg-np.eye(3),origCoords) # dX = Favg.X
return avgDisplacement
#--------------------------------------------------------------------------------------------------
def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
"""calculate cell center (or nodal) displacement for deformation gradient field specified in each grid cell"""
integrator = 0.5j * size / math.pi
kk, kj, ki = np.meshgrid(np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2])),
np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1])),
np.arange(grid[0]//2+1),
indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3)
k_sSquared = np.einsum('...l,...l',k_s,k_s)
k_sSquared[0,0,0] = 1.0 # ignore global average frequency
#--------------------------------------------------------------------------------------------------
# integration in Fourier space
displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm',
F if transformed else np.fft.rfftn(F,axes=(0,1,2)),
k_s,
integrator,
) / k_sSquared[...,np.newaxis]
#--------------------------------------------------------------------------------------------------
# backtransformation to real space
displacement = np.fft.irfftn(displacement_fourier,grid,axes=(0,1,2))
return cell2node(displacement,grid) if nodal else displacement
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options file[s]', description = """
Add deformed configuration of given initial coordinates.
Operates on periodic three-dimensional x,y,z-ordered data sets.
""", version = scriptID)
parser.add_option('-f', '--defgrad',
dest = 'defgrad',
metavar = 'string',
help = 'column label of deformation gradient [%default]')
parser.add_option('-c', '--coordinates',
dest = 'coords',
metavar = 'string',
help = 'column label of coordinates [%default]')
parser.add_option('--nodal',
dest = 'nodal',
action = 'store_true',
help = 'output nodal (not cell-centered) displacements')
parser.set_defaults(defgrad = 'f',
coords = 'ipinitialcoord',
nodal = False,
)
(options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
outname = (os.path.splitext(name)[0]+
'_nodal'+
os.path.splitext(name)[1]) if (options.nodal and name) else None,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.defgrad) != 9:
errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad))
coordDim = table.label_dimension(options.coords)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.coords))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray([options.defgrad,options.coords])
table.data_rewind()
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data[:,9:].shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data[:,9:].shape[1]),dtype='f'))) # fill coords up to 3D with zeros
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --------------- figure out size and grid ---------------------------------------------------------
coords = [np.unique(table.data[:,9+i]) for i in xrange(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings
N = grid.prod()
if N != len(table.data): errors.append('data count {} does not match grid {}x{}x{}.'.format(N,*grid))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ------------------------------------------
F_fourier = np.fft.rfftn(table.data[:,:9].reshape(grid[2],grid[1],grid[0],3,3),axes=(0,1,2)) # perform transform only once...
displacement = displacementFluctFFT(F_fourier,grid,size,options.nodal,transformed=True)
avgDisplacement = displacementAvgFFT (F_fourier,grid,size,options.nodal,transformed=True)
# ------------------------------------------ assemble header ---------------------------------------
if options.nodal:
table.info_clear()
table.labels_clear()
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) +
['{}_avg({}).{}' .format(i+1,options.defgrad,options.coords) for i in xrange(3)] +
['{}_fluct({}).{}'.format(i+1,options.defgrad,options.coords) for i in xrange(3)] )
table.head_write()
# ------------------------------------------ output data -------------------------------------------
zrange = np.linspace(0,size[2],1+grid[2]) if options.nodal else xrange(grid[2])
yrange = np.linspace(0,size[1],1+grid[1]) if options.nodal else xrange(grid[1])
xrange = np.linspace(0,size[0],1+grid[0]) if options.nodal else xrange(grid[0])
for i,z in enumerate(zrange):
for j,y in enumerate(yrange):
for k,x in enumerate(xrange):
if options.nodal: table.data_clear()
else: table.data_read()
table.data_append([x,y,z] if options.nodal else [])
table.data_append(list(avgDisplacement[i,j,k,:]))
table.data_append(list( displacement[i,j,k,:]))
table.data_write()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -5,7 +5,6 @@ import numpy as np
import damask import damask
from optparse import OptionParser from optparse import OptionParser
from scipy import spatial from scipy import spatial
from collections import defaultdict
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
@ -23,7 +22,7 @@ parser.add_option('-r', '--radius',
parser.add_option('-d', '--disorientation', parser.add_option('-d', '--disorientation',
dest = 'disorientation', dest = 'disorientation',
type = 'float', metavar = 'float', type = 'float', metavar = 'float',
help = 'disorientation threshold per grain [%default] (degrees)') help = 'disorientation threshold in degrees [%default]')
parser.add_option('-s', '--symmetry', parser.add_option('-s', '--symmetry',
dest = 'symmetry', dest = 'symmetry',
type = 'string', metavar = 'string', type = 'string', metavar = 'string',
@ -61,7 +60,8 @@ parser.add_option('-p', '--position',
type = 'string', metavar = 'string', type = 'string', metavar = 'string',
help = 'spatial position of voxel [%default]') help = 'spatial position of voxel [%default]')
parser.set_defaults(symmetry = 'cubic', parser.set_defaults(disorientation = 5,
symmetry = 'cubic',
coords = 'pos', coords = 'pos',
degrees = False, degrees = False,
) )
@ -86,17 +86,16 @@ if np.sum(input) != 1: parser.error('needs exactly one input format.')
(options.matrix,9,'matrix'), (options.matrix,9,'matrix'),
(options.quaternion,4,'quaternion'), (options.quaternion,4,'quaternion'),
][np.where(input)[0][0]] # select input label that was requested ][np.where(input)[0][0]] # select input label that was requested
toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians toRadians = np.pi/180.0 if options.degrees else 1.0 # rescale degrees to radians
cos_disorientation = np.cos(options.disorientation/2.*toRadians) cos_disorientation = np.cos(np.radians(options.disorientation/2.)) # cos of half the disorientation angle
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False)
buffered = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -109,8 +108,10 @@ for name in filenames:
errors = [] errors = []
remarks = [] remarks = []
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) if not 3 >= table.label_dimension(options.coords) >= 1:
if not np.all(table.label_dimension(label) == dim): errors.append('input {} does not have dimension {}.'.format(label,dim)) errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords))
if not np.all(table.label_dimension(label) == dim):
errors.append('input {} does not have dimension {}.'.format(label,dim))
else: column = table.label_index(label) else: column = table.label_index(label)
if remarks != []: damask.util.croak(remarks) if remarks != []: damask.util.croak(remarks)
@ -122,8 +123,10 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append('grainID_{}@{}'.format(label, table.labels_append('grainID_{}@{:g}'.format('+'.join(label)
options.disorientation if options.degrees else np.degrees(options.disorientation))) # report orientation source and disorientation in degrees if isinstance(label, (list,tuple))
else label,
options.disorientation)) # report orientation source and disorientation
table.head_write() table.head_write()
# ------------------------------------------ process data ------------------------------------------ # ------------------------------------------ process data ------------------------------------------
@ -162,7 +165,7 @@ for name in filenames:
time_delta = (time.clock()-tick) * (len(grainID) - p) / p time_delta = (time.clock()-tick) * (len(grainID) - p) / p
bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'\ bg.set_message('(%02i:%02i:%02i) processing point %i of %i (grain count %i)...'\
%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations))) %(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),np.count_nonzero(memberCounts)))
if inputtype == 'eulers': if inputtype == 'eulers':
o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians, o = damask.Orientation(Eulers = np.array(map(float,table.data[column:column+3]))*toRadians,
@ -179,84 +182,51 @@ for name in filenames:
o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])), o = damask.Orientation(quaternion = np.array(map(float,table.data[column:column+4])),
symmetry = options.symmetry).reduced() symmetry = options.symmetry).reduced()
matched = False matched = False
alreadyChecked = {}
candidates = []
bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case
# check against last matched needs to be really picky. best would be to exclude jumps across the poke (checking distance between last and me?) for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
# when walking through neighborhood first check whether grainID of that point has already been tested, if yes, skip! gID = grainID[i]
if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested?
if matchedID != -1: # has matched before? alreadyChecked[gID] = True # remember not to check again
matched = (o.quaternion.conjugated() * orientations[matchedID].quaternion).w > cos_disorientation disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation
if disorientation.quaternion.w > cos_disorientation: # within threshold ...
if not matched: candidates.append(gID) # remember as potential candidate
alreadyChecked = {} if disorientation.quaternion.w >= bestDisorientation.w: # ... and better than current best?
bestDisorientation = damask.Quaternion([0,0,0,1]) # initialize to 180 deg rotation as worst case
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring points
gID = grainID[i]
if gID != -1 and gID not in alreadyChecked: # indexed point belonging to a grain not yet tested?
alreadyChecked[gID] = True # remember not to check again
disorientation = o.disorientation(orientations[gID],SST = False)[0] # compare against other orientation
if disorientation.quaternion.w > cos_disorientation and \
disorientation.quaternion.w >= bestDisorientation.w: # within threshold and betterthan current best?
matched = True matched = True
matchedID = gID # remember that grain matchedID = gID # remember that grain
bestDisorientation = disorientation.quaternion bestDisorientation = disorientation.quaternion
if not matched: # no match -> new grain found if matched: # did match existing grain
memberCounts += [1] # start new membership counter memberCounts[matchedID] += 1
if len(candidates) > 1: # ambiguity in grain identification?
largestGrain = sorted(candidates,key=lambda x:memberCounts[x])[-1] # find largest among potential candidate grains
matchedID = largestGrain
for c in [c for c in candidates if c != largestGrain]: # loop over smaller candidates
memberCounts[largestGrain] += memberCounts[c] # reassign member count of smaller to largest
memberCounts[c] = 0
grainID = np.where(np.in1d(grainID,candidates), largestGrain, grainID) # relabel grid points of smaller candidates as largest one
else: # no match -> new grain found
orientations += [o] # initialize with current orientation orientations += [o] # initialize with current orientation
memberCounts += [1] # start new membership counter
matchedID = g matchedID = g
g += 1 # increment grain counter g += 1 # increment grain counter
else: # did match existing grain
memberCounts[matchedID] += 1
grainID[p] = matchedID # remember grain index assigned to point grainID[p] = matchedID # remember grain index assigned to point
p += 1 # increment point p += 1 # increment point
bg.set_message('identifying similar orientations among {} grains...'.format(len(orientations))) grainIDs = np.where(np.array(memberCounts) > 0)[0] # identify "live" grain identifiers
packingMap = dict(zip(list(grainIDs),range(len(grainIDs)))) # map to condense into consecutive IDs
memberCounts = np.array(memberCounts)
similarOrientations = [[] for i in xrange(len(orientations))]
for i,orientation in enumerate(orientations[:-1]): # compare each identified orientation...
for j in xrange(i+1,len(orientations)): # ...against all others that were defined afterwards
if orientation.disorientation(orientations[j],SST = False)[0].quaternion.w > cos_disorientation: # similar orientations in both grainIDs?
similarOrientations[i].append(j) # remember in upper triangle...
similarOrientations[j].append(i) # ...and lower triangle of matrix
if similarOrientations[i] != []:
bg.set_message('grainID {} is as: {}'.format(i,' '.join(map(str,similarOrientations[i]))))
stillShifting = True
while stillShifting:
stillShifting = False
tick = time.clock()
for p,gID in enumerate(grainID): # walk through all points
if p > 0 and p % 1000 == 0:
time_delta = (time.clock()-tick) * (len(grainID) - p) / p
bg.set_message('(%02i:%02i:%02i) shifting ID of point %i out of %i (grain count %i)...'
%(time_delta//3600,time_delta%3600//60,time_delta%60,p,len(grainID),len(orientations)))
if similarOrientations[gID] != []: # orientation of my grainID is similar to someone else?
similarNeighbors = defaultdict(int) # frequency of neighboring grainIDs sharing my orientation
for i in kdtree.query_ball_point(kdtree.data[p],options.radius): # check all neighboring point
if grainID[i] in similarOrientations[gID]: # neighboring point shares my orientation?
similarNeighbors[grainID[i]] += 1 # remember its grainID
if similarNeighbors != {}: # found similar orientation(s) in neighborhood
candidates = np.array([gID]+similarNeighbors.keys()) # possible replacement grainIDs for me
grainID[p] = candidates[np.argsort(memberCounts[candidates])[-1]] # adopt ID that is most frequent in overall dataset
memberCounts[gID] -= 1 # my former ID loses one fellow
memberCounts[grainID[p]] += 1 # my new ID gains one fellow
bg.set_message('{}:{} --> {}'.format(p,gID,grainID[p])) # report switch of grainID
stillShifting = True
table.data_rewind() table.data_rewind()
outputAlive = True outputAlive = True
p = 0 p = 0
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
table.data_append(1+grainID[p]) # add grain ID table.data_append(1+packingMap[grainID[p]]) # add (condensed) grain ID
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line
p += 1 p += 1

View File

@ -239,7 +239,9 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------- # ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{id}_S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\ table.labels_append(['{id}_'
'S[{direction[0]:.1g}_{direction[1]:.1g}_{direction[2]:.1g}]'
'({normal[0]:.1g}_{normal[1]:.1g}_{normal[2]:.1g})'\
.format( id = i+1, .format( id = i+1,
normal = theNormal, normal = theNormal,
direction = theDirection, direction = theDirection,

View File

@ -37,10 +37,14 @@ if options.label is None:
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.croak(name)
table = damask.ASCIItable(name = name,
outname = options.label+'_averaged_'+name if name else name, try: table = damask.ASCIItable(name = name,
buffered = False) outname = os.path.join(
os.path.split(name)[0],
options.label+'_averaged_'+os.path.split(name)[1]
) if name else name,
buffered = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)

142
processing/post/histogram.py Executable file
View File

@ -0,0 +1,142 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys
import numpy as np
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Generate histogram of N bins in given data range.
""", version = scriptID)
parser.add_option('-d','--data',
dest = 'data',
type = 'string', metavar = 'string',
help = 'column heading for data')
parser.add_option('-w','--weights',
dest = 'weights',
type = 'string', metavar = 'string',
help = 'column heading for weights')
parser.add_option('--range',
dest = 'range',
type = 'float', nargs = 2, metavar = 'float float',
help = 'data range of histogram [min - max]')
parser.add_option('-N',
dest = 'N',
type = 'int', metavar = 'int',
help = 'number of bins')
parser.add_option('--density',
dest = 'density',
action = 'store_true',
help = 'report probability density')
parser.add_option('--logarithmic',
dest = 'log',
action = 'store_true',
help = 'logarithmically spaced bins')
parser.set_defaults(data = None,
weights = None,
range = None,
N = None,
density = False,
log = False,
)
(options,filenames) = parser.parse_args()
if not options.data: parser.error('no data specified.')
if not options.N: parser.error('no bin number specified.')
if options.log:
def forward(x):
return np.log(x)
def reverse(x):
return np.exp(x)
else:
def forward(x):
return x
def reverse(x):
return x
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
if table.label_dimension(options.data) != 1: errors.append('data {} are not scalar.'.format(options.data))
if options.weights and \
table.label_dimension(options.data) != 1: errors.append('weights {} are not scalar.'.format(options.weights))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# --------------- read data ----------------------------------------------------------------
table.data_readArray([options.data,options.weights])
# --------------- auto range ---------------------------------------------------------------
if options.range is None:
rangeMin,rangeMax = min(table.data[:,0]),max(table.data[:,0])
else:
rangeMin,rangeMax = min(options.range),max(options.range)
# --------------- bin data ----------------------------------------------------------------
count,edges = np.histogram(table.data[:,0],
bins = reverse(forward(rangeMin) + np.arange(options.N+1) *
(forward(rangeMax)-forward(rangeMin))/options.N),
range = (rangeMin,rangeMax),
weights = None if options.weights is None else table.data[:,1],
density = options.density,
)
bincenter = reverse(forward(rangeMin) + (0.5+np.arange(options.N)) *
(forward(rangeMax)-forward(rangeMin))/options.N) # determine center of bins
# ------------------------------------------ assemble header ---------------------------------------
table.info_clear()
table.info_append([scriptID + '\t' + ' '.join(sys.argv[1:]),
scriptID + ':\t' +
'data range {} -- {}'.format(rangeMin,rangeMax) +
(' (log)' if options.log else ''),
])
table.labels_clear()
table.labels_append(['bincenter','count'])
table.head_write()
# ------------------------------------------ output result -----------------------------------------
table.data = np.squeeze(np.dstack((bincenter,count)))
table.data_writeArray()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,sys import os,sys,re
import damask import damask
from optparse import OptionParser from optparse import OptionParser
@ -32,14 +32,17 @@ parser.set_defaults(label = [],
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
pattern = [re.compile('^()(.+)$'), # label pattern for scalar
re.compile('^(\d+_)?(.+)$'), # label pattern for multidimension
]
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False)
buffered = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -63,8 +66,9 @@ for name in filenames:
for i,index in enumerate(indices): for i,index in enumerate(indices):
if index == -1: remarks.append('label {} not present...'.format(options.label[i])) if index == -1: remarks.append('label {} not present...'.format(options.label[i]))
else: else:
m = pattern[dimensions[i]>1].match(table.labels[index]) # isolate label name
for j in xrange(dimensions[i]): for j in xrange(dimensions[i]):
table.labels[index+j] = table.labels[index+j].replace(options.label[i],options.substitute[i]) table.labels[index+j] = table.labels[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute
if remarks != []: damask.util.croak(remarks) if remarks != []: damask.util.croak(remarks)
if errors != []: if errors != []:

View File

@ -1,8 +1,9 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*- # -*- coding: UTF-8 no BOM -*-
import os,sys,vtk import os,vtk
import damask import damask
from collections import defaultdict
from optparse import OptionParser from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
@ -17,125 +18,157 @@ Add scalar and RGB tuples from ASCIItable to existing VTK point cloud (.vtp).
""", version = scriptID) """, version = scriptID)
parser.add_option('-v', '--vtk', dest='vtk', \ parser.add_option( '--vtk',
dest = 'vtk',
type = 'string', metavar = 'string',
help = 'VTK file name') help = 'VTK file name')
parser.add_option( '--inplace',
dest = 'inplace',
action = 'store_true',
help = 'modify VTK file in-place')
parser.add_option('-r', '--render',
dest = 'render',
action = 'store_true',
help = 'open output in VTK render window')
parser.add_option('-s', '--scalar', dest='scalar', action='extend', \ parser.add_option('-s', '--scalar', dest='scalar', action='extend', \
help = 'scalar values') help = 'scalar values')
parser.add_option('-v', '--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'vector value label(s)')
parser.add_option('-c', '--color', dest='color', action='extend', \ parser.add_option('-c', '--color', dest='color', action='extend', \
help = 'RGB color tuples') help = 'RGB color tuples')
parser.set_defaults(scalar = []) parser.set_defaults(scalar = [],
parser.set_defaults(color = []) vector = [],
color = [],
inplace = False,
render = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
datainfo = { # list of requested labels per datatype if not options.vtk: parser.error('No VTK file specified.')
'scalar': {'len':1, if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
'label':[]},
'color': {'len':3,
'label':[]},
}
if not os.path.exists(options.vtk):
parser.error('VTK file does not exist'); sys.exit()
reader = vtk.vtkXMLPolyDataReader() reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(options.vtk) reader.SetFileName(options.vtk)
reader.Update() reader.Update()
Npoints = reader.GetNumberOfPoints() Npoints = reader.GetNumberOfPoints()
Ncells = reader.GetNumberOfCells() Ncells = reader.GetNumberOfCells()
Nvertices = reader.GetNumberOfVerts() Nvertices = reader.GetNumberOfVerts()
Polydata = reader.GetOutput() Polydata = reader.GetOutput()
if Npoints != Ncells or Npoints != Nvertices: if Npoints != Ncells or Npoints != Nvertices:
parser.error('Number of points, cells, and vertices in VTK differ from each other'); sys.exit() parser.error('Number of points, cells, and vertices in VTK differ from each other.')
if options.scalar is not None: datainfo['scalar']['label'] += options.scalar
if options.color is not None: datainfo['color']['label'] += options.color
# ------------------------------------------ setup file handles --------------------------------------- damask.util.croak('{}: {} points, {} vertices, and {} cells...'.format(options.vtk,Npoints,Nvertices,Ncells))
files = [] # --- loop over input files -------------------------------------------------------------------------
if filenames == []:
files.append({'name':'STDIN', 'input':sys.stdin, 'output':sys.stdout, 'croak':sys.stderr})
else:
for name in filenames:
if os.path.exists(name):
files.append({'name':name, 'input':open(name), 'output':sys.stderr, 'croak':sys.stderr})
#--- loop over input files ------------------------------------------------------------------------ if filenames == []: filenames = [None]
for file in files:
if file['name'] != 'STDIN': file['croak'].write('\033[1m'+scriptName+'\033[0m: '+file['name']+'\n')
else: file['croak'].write('\033[1m'+scriptName+'\033[0m\n')
table = damask.ASCIItable(file['input'],file['output'],False) # make unbuffered ASCII_table for name in filenames:
table.head_read() # read ASCII header info try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName, name)
# --------------- figure out columns to process # --- interpret header ----------------------------------------------------------------------------
active = {}
column = {}
array = {} table.head_read()
remarks = []
errors = []
VTKarray = {}
active = defaultdict(list)
for datatype,info in datainfo.items(): for datatype,dimension,label in [['scalar',1,options.scalar],
for label in info['label']: ['vector',3,options.vector],
foundIt = False ['color',3,options.color],
for key in ['1_'+label,label]: ]:
if key in table.labels: for i,dim in enumerate(table.label_dimension(label)):
foundIt = True me = label[i]
if datatype not in active: active[datatype] = [] if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me))
if datatype not in column: column[datatype] = {} elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension))
if datatype not in array: array[datatype] = {} else:
active[datatype].append(label) remarks.append('adding {} "{}"...'.format(datatype,me))
column[datatype][label] = table.labels.index(key) # remember columns of requested data active[datatype].append(me)
if datatype == 'scalar':
array[datatype][label] = vtk.vtkDoubleArray() if datatype in ['scalar','vector']: VTKarray[me] = vtk.vtkDoubleArray()
array[datatype][label].SetNumberOfComponents(1) elif datatype == 'color': VTKarray[me] = vtk.vtkUnsignedCharArray()
array[datatype][label].SetName(label)
elif datatype == 'color': VTKarray[me].SetNumberOfComponents(dimension)
array[datatype][label] = vtk.vtkUnsignedCharArray() VTKarray[me].SetName(label[i])
array[datatype][label].SetNumberOfComponents(3)
array[datatype][label].SetName(label) if remarks != []: damask.util.croak(remarks)
if not foundIt: if errors != []:
file['croak'].write('column %s not found...\n'%label) damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
while table.data_read(): # read next data line of ASCII table while table.data_read(): # read next data line of ASCII table
for datatype,labels in active.items(): # loop over scalar,color for datatype,labels in active.items(): # loop over scalar,color
for label in labels: # loop over all requested items for me in labels: # loop over all requested items
theData = table.data[column[datatype][label]:\ theData = [table.data[i] for i in table.label_indexrange(me)] # read strings
column[datatype][label]+datainfo[datatype]['len']] # read strings if datatype == 'color': VTKarray[me].InsertNextTuple3(*map(lambda x: int(255.*float(x)),theData))
if datatype == 'color': elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData))
theData = map(lambda x: int(255.*float(x)),theData) elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0]))
array[datatype][label].InsertNextTuple3(theData[0],theData[1],theData[2],)
elif datatype == 'scalar':
array[datatype][label].InsertNextValue(float(theData[0]))
table.input_close() # close input ASCII table table.input_close() # close input ASCII table
# ------------------------------------------ add data --------------------------------------- # ------------------------------------------ add data ---------------------------------------
for datatype,labels in active.items(): # loop over scalar,color for datatype,labels in active.items(): # loop over scalar,color
if datatype == 'color': if datatype == 'color':
Polydata.GetPointData().SetScalars(array[datatype][labels[0]]) Polydata.GetPointData().SetScalars(VTKarray[active['color'][0]])
Polydata.GetCellData().SetScalars(array[datatype][labels[0]]) Polydata.GetCellData().SetScalars(VTKarray[active['color'][0]])
for label in labels: # loop over all requested items for me in labels: # loop over all requested items
Polydata.GetPointData().AddArray(array[datatype][label]) Polydata.GetPointData().AddArray(VTKarray[me])
Polydata.GetCellData().AddArray(array[datatype][label]) Polydata.GetCellData().AddArray(VTKarray[me])
Polydata.Modified() Polydata.Modified()
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: Polydata.Update()
Polydata.Update()
# ------------------------------------------ output result --------------------------------------- # ------------------------------------------ output result ---------------------------------------
writer = vtk.vtkXMLPolyDataWriter() writer = vtk.vtkXMLPolyDataWriter()
writer.SetDataModeToBinary() writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.splitext(options.vtk)[0]+'_added.vtp') writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtp' if options.inplace else '_added.vtp'))
if vtk.VTK_MAJOR_VERSION <= 5: if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(Polydata)
writer.SetInput(Polydata) else: writer.SetInputData(Polydata)
else: writer.Write()
writer.SetInputData(Polydata)
writer.Write() # ------------------------------------------ render result ---------------------------------------
if options.render:
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(Polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the graphics structure. The renderer renders into the
# render window. The render window interactor captures mouse events
# and will perform appropriate camera or actor manipulation
# depending on the nature of the events.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()

View File

@ -83,9 +83,9 @@ damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Nce
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False,
buffered = False, readonly = True) readonly = True)
except: continue except: continue
damask.util.report(scriptName, name) damask.util.report(scriptName, name)
@ -133,6 +133,8 @@ for name in filenames:
elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData)) elif datatype == 'vector': VTKarray[me].InsertNextTuple3(*map(float,theData))
elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0])) elif datatype == 'scalar': VTKarray[me].InsertNextValue(float(theData[0]))
table.input_close() # close input ASCII table
# ------------------------------------------ add data --------------------------------------- # ------------------------------------------ add data ---------------------------------------
for datatype,labels in active.items(): # loop over scalar,color for datatype,labels in active.items(): # loop over scalar,color

View File

@ -18,12 +18,12 @@ Produce a VTK point cloud dataset based on coordinates given in an ASCIItable.
""", version = scriptID) """, version = scriptID)
parser.add_option('-d', '--deformed', parser.add_option('-c', '--coordinates',
dest = 'deformed', dest = 'pos',
type = 'string', metavar = 'string', type = 'string', metavar = 'string',
help = 'deformed coordinate label [%default]') help = 'coordinate label [%default]')
parser.set_defaults(deformed = 'ipdeformedcoord' parser.set_defaults(pos = 'pos'
) )
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
@ -46,9 +46,9 @@ for name in filenames:
errors = [] errors = []
remarks = [] remarks = []
coordDim = table.label_dimension(options.deformed) coordDim = table.label_dimension(options.pos)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.deformed)) if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.deformed)) elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.pos))
if remarks != []: damask.util.croak(remarks) if remarks != []: damask.util.croak(remarks)
if errors != []: if errors != []:
@ -58,7 +58,7 @@ for name in filenames:
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
table.data_readArray(options.deformed) table.data_readArray(options.pos)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3: if table.data.shape[1] < 3:
table.data = np.hstack((table.data, table.data = np.hstack((table.data,

View File

@ -38,9 +38,9 @@ parser.set_defaults(position ='ipinitialcoord',
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False,
buffered = False, readonly = True) readonly = True)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -48,10 +48,13 @@ for name in filenames:
table.head_read() table.head_read()
errors = [] remarks = []
if table.label_dimension(options.position) != 3: errors = []
errors.append('coordinates {} are not a vector.'.format(options.position)) coordDim = table.label_dimension(options.position)
if not 3 >= coordDim >= 1: errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.position))
elif coordDim < 3: remarks.append('appending {} dimensions to coordinates "{}"...'.format(3-coordDim,options.position))
if remarks != []: damask.util.croak(remarks)
if errors != []: if errors != []:
damask.util.croak(errors) damask.util.croak(errors)
table.close(dismiss=True) table.close(dismiss=True)
@ -60,6 +63,11 @@ for name in filenames:
# --------------- figure out size and grid --------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray(options.position) table.data_readArray(options.position)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3:
table.data = np.hstack((table.data,
np.zeros((table.data.shape[0],
3-table.data.shape[1]),dtype='f'))) # fill coords up to 3D with zeros
coords = [np.unique(table.data[:,i]) for i in xrange(3)] coords = [np.unique(table.data[:,i]) for i in xrange(3)]
if options.mode == 'cell': if options.mode == 'cell':

View File

@ -4,7 +4,7 @@
import os,sys,math import os,sys,math
import numpy as np import numpy as np
import multiprocessing import multiprocessing
from optparse import OptionParser from optparse import OptionParser,OptionGroup
from scipy import spatial from scipy import spatial
import damask import damask
@ -109,71 +109,83 @@ Generate geometry description and material configuration by standard Voronoi tes
""", version = scriptID) """, version = scriptID)
parser.add_option('-g', '--grid',
dest = 'grid', group = OptionGroup(parser, "Tessellation","")
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help = 'a,b,c grid of hexahedral box [auto]') group.add_option('-l', '--laguerre',
parser.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'x,y,z size of hexahedral box [auto]')
parser.add_option('-o', '--origin',
dest = 'origin',
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'offset from old to new origin of grid')
parser.add_option('-p', '--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for seed positions [%default]')
parser.add_option('-w', '--weight',
dest = 'weight',
type = 'string', metavar = 'string',
help = 'column label for seed weights [%default]')
parser.add_option('-m', '--microstructure',
dest = 'microstructure',
type = 'string', metavar = 'string',
help = 'column label for seed microstructures [%default]')
parser.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'column label for seed Euler angles [%default]')
parser.add_option('--axes',
dest = 'axes',
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
help = 'orientation coordinate frame in terms of position coordinate frame')
parser.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [%default]')
parser.add_option('--crystallite',
dest = 'crystallite',
type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]')
parser.add_option('--phase',
dest = 'phase',
type = 'int', metavar = 'int',
help = 'phase index to be used [%default]')
parser.add_option('-r', '--rnd',
dest = 'randomSeed',
type = 'int', metavar='int',
help = 'seed of random number generator for second phase distribution [%default]')
parser.add_option('--secondphase',
dest = 'secondphase',
type = 'float', metavar= 'float',
help = 'volume fraction of randomly distribute second phase [%default]')
parser.add_option('-l', '--laguerre',
dest = 'laguerre', dest = 'laguerre',
action = 'store_true', action = 'store_true',
help = 'use Laguerre (weighted Voronoi) tessellation') help = 'use Laguerre (weighted Voronoi) tessellation')
parser.add_option('--cpus', group.add_option('--cpus',
dest = 'cpus', dest = 'cpus',
type = 'int', metavar = 'int', type = 'int', metavar = 'int',
help = 'number of parallel processes to use for Laguerre tessellation [%default]') help = 'number of parallel processes to use for Laguerre tessellation [%default]')
parser.add_option('--nonperiodic', group.add_option('--nonperiodic',
dest = 'nonperiodic', dest = 'nonperiodic',
action = 'store_true', action = 'store_true',
help = 'use nonperiodic tessellation') help = 'use nonperiodic tessellation')
parser.add_option_group(group)
group = OptionGroup(parser, "Geometry","")
group.add_option('-g', '--grid',
dest = 'grid',
type = 'int', nargs = 3, metavar = ' '.join(['int']*3),
help = 'a,b,c grid of hexahedral box [auto]')
group.add_option('-s', '--size',
dest = 'size',
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'x,y,z size of hexahedral box [auto]')
group.add_option('-o', '--origin',
dest = 'origin',
type = 'float', nargs = 3, metavar=' '.join(['float']*3),
help = 'origin of grid')
parser.add_option_group(group)
group = OptionGroup(parser, "Seeds","")
group.add_option('-p', '--position',
dest = 'position',
type = 'string', metavar = 'string',
help = 'column label for seed positions [%default]')
group.add_option('-w', '--weight',
dest = 'weight',
type = 'string', metavar = 'string',
help = 'column label for seed weights [%default]')
group.add_option('-m', '--microstructure',
dest = 'microstructure',
type = 'string', metavar = 'string',
help = 'column label for seed microstructures [%default]')
group.add_option('-e', '--eulers',
dest = 'eulers',
type = 'string', metavar = 'string',
help = 'column label for seed Euler angles [%default]')
group.add_option('--axes',
dest = 'axes',
type = 'string', nargs = 3, metavar = ' '.join(['string']*3),
help = 'orientation coordinate frame in terms of position coordinate frame')
parser.add_option_group(group)
group = OptionGroup(parser, "Configuration","")
group.add_option('--homogenization',
dest = 'homogenization',
type = 'int', metavar = 'int',
help = 'homogenization index to be used [%default]')
group.add_option('--crystallite',
dest = 'crystallite',
type = 'int', metavar = 'int',
help = 'crystallite index to be used [%default]')
group.add_option('--phase',
dest = 'phase',
type = 'int', metavar = 'int',
help = 'phase index to be used [%default]')
parser.add_option_group(group)
parser.set_defaults(position = 'pos', parser.set_defaults(position = 'pos',
weight = 'weight', weight = 'weight',
microstructure = 'microstructure', microstructure = 'microstructure',
@ -181,26 +193,20 @@ parser.set_defaults(position = 'pos',
homogenization = 1, homogenization = 1,
crystallite = 1, crystallite = 1,
phase = 1, phase = 1,
secondphase = 0.0,
cpus = 2, cpus = 2,
laguerre = False, laguerre = False,
nonperiodic = False, nonperiodic = False,
randomSeed = None,
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.secondphase > 1.0 or options.secondphase < 0.0:
parser.error('volume fraction of second phase ({}) out of bounds.'.format(options.secondphase))
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, outname = os.path.splitext(name)[-2]+'.geom' if name else name,
outname = os.path.splitext(name)[-2]+'.geom' if name else name, buffered = False)
buffered = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -294,20 +300,11 @@ for name in filenames:
config_header = [] config_header = []
formatwidth = 1+int(math.log10(NgrainIDs)) formatwidth = 1+int(math.log10(NgrainIDs))
phase = options.phase * np.ones(NgrainIDs,'i')
if int(options.secondphase*info['microstructures']) > 0:
phase[0:int(options.secondphase*info['microstructures'])] += 1 # alter fraction 'options.secondphase' of used grainIDs
randomSeed = options.randomSeed if options.randomSeed \
else int(os.urandom(4).encode('hex'), 16) # random seed for second phase
np.random.seed(randomSeed)
np.random.shuffle(phase)
config_header += ['# random seed (phase shuffling): {}'.format(randomSeed)]
config_header += ['<microstructure>'] config_header += ['<microstructure>']
for i,ID in enumerate(grainIDs): for i,ID in enumerate(grainIDs):
config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)), config_header += ['[Grain%s]'%(str(ID).zfill(formatwidth)),
'crystallite %i'%options.crystallite, 'crystallite %i'%options.crystallite,
'(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(phase[i],str(ID).rjust(formatwidth)), '(constituent)\tphase %i\ttexture %s\tfraction 1.0'%(options.phase,str(ID).rjust(formatwidth)),
] ]
if hasEulers: if hasEulers:
config_header += ['<texture>'] config_header += ['<texture>']

View File

@ -48,8 +48,11 @@ parser.add_option('-m', '--microstructure',
parser.add_option('-r', '--rnd', parser.add_option('-r', '--rnd',
dest = 'randomSeed', type = 'int', metavar = 'int', dest = 'randomSeed', type = 'int', metavar = 'int',
help = 'seed of random number generator [%default]') help = 'seed of random number generator [%default]')
parser.add_option('--format',
dest = 'format', type = 'string', metavar = 'string',
help = 'number format of output [auto]')
group = OptionGroup(parser, "Laguerre Tessellation Options", group = OptionGroup(parser, "Laguerre Tessellation",
"Parameters determining shape of weight distribution of seed points" "Parameters determining shape of weight distribution of seed points"
) )
group.add_option('-w', '--weights', group.add_option('-w', '--weights',
@ -70,8 +73,8 @@ group.add_option('--sigma',
help='standard deviation of normal distribution for weights [%default]') help='standard deviation of normal distribution for weights [%default]')
parser.add_option_group(group) parser.add_option_group(group)
group = OptionGroup(parser, "Selective Seeding Options", group = OptionGroup(parser, "Selective Seeding",
"More uniform distribution of seed points using Mitchell\'s Best Candidate Algorithm" "More uniform distribution of seed points using Mitchell's Best Candidate Algorithm"
) )
group.add_option('-s','--selective', group.add_option('-s','--selective',
action = 'store_true', action = 'store_true',
@ -103,6 +106,7 @@ parser.set_defaults(randomSeed = None,
force = False, force = False,
distance = 0.2, distance = 0.2,
numCandidates = 10, numCandidates = 10,
format = None,
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
@ -215,7 +219,7 @@ for name in filenames:
# --- write seeds information ------------------------------------------------------------ # --- write seeds information ------------------------------------------------------------
table.data = seeds table.data = seeds
table.data_writeArray() table.data_writeArray(fmt = options.format)
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------