Merge branch 'development' into CCodeUse

This commit is contained in:
Martin Diehl 2016-03-21 19:47:09 +01:00
commit ca5ed22d66
16 changed files with 413 additions and 250 deletions

View File

@ -1 +1 @@
v2.0.0-15-g3898fb2 v2.0.0-43-ge39441f

View File

@ -3569,11 +3569,7 @@ logical function crystallite_integrateStress(&
maxticks maxticks
external :: & external :: &
#if(FLOAT==8)
dgesv dgesv
#elif(FLOAT==4)
sgesv
#endif
!* be pessimistic !* be pessimistic
crystallite_integrateStress = .false. crystallite_integrateStress = .false.
@ -3756,11 +3752,7 @@ logical function crystallite_integrateStress(&
- math_Plain3333to99(math_mul3333xx3333(math_mul3333xx3333(dLp_dT3333,dT_dFe3333),dFe_dLp3333)) - math_Plain3333to99(math_mul3333xx3333(math_mul3333xx3333(dLp_dT3333,dT_dFe3333),dFe_dLp3333))
dRLp_dLp2 = dRLp_dLp ! will be overwritten in first call to LAPACK routine dRLp_dLp2 = dRLp_dLp ! will be overwritten in first call to LAPACK routine
work = math_plain33to9(residuumLp) work = math_plain33to9(residuumLp)
#if(FLOAT==8)
call dgesv(9,1,dRLp_dLp2,9,ipiv,work,9,ierr) ! solve dRLp/dLp * delta Lp = -res for delta Lp call dgesv(9,1,dRLp_dLp2,9,ipiv,work,9,ierr) ! solve dRLp/dLp * delta Lp = -res for delta Lp
#elif(FLOAT==4)
call sgesv(9,1,dRLp_dLp2,9,ipiv,work,9,ierr) ! solve dRLp/dLp * delta Lp = -res for delta Lp
#endif
if (ierr /= 0_pInt) then if (ierr /= 0_pInt) then
#ifndef _OPENMP #ifndef _OPENMP
if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then
@ -3849,31 +3841,27 @@ logical function crystallite_integrateStress(&
math_mul3333xx3333(dT_dFi3333, dFi_dLi3333))) & math_mul3333xx3333(dT_dFi3333, dFi_dLi3333))) &
- math_Plain3333to99(math_mul3333xx3333(dLi_dFi3333, dFi_dLi3333)) - math_Plain3333to99(math_mul3333xx3333(dLi_dFi3333, dFi_dLi3333))
work = math_plain33to9(residuumLi) work = math_plain33to9(residuumLi)
#if(FLOAT==8)
call dgesv(9,1,dRLi_dLi,9,ipiv,work,9,ierr) ! solve dRLi/dLp * delta Li = -res for delta Li call dgesv(9,1,dRLi_dLi,9,ipiv,work,9,ierr) ! solve dRLi/dLp * delta Li = -res for delta Li
#elif(FLOAT==4) if (ierr /= 0_pInt) then
call sgesv(9,1,dRLi_dLi,9,ipiv,work,9,ierr) ! solve dRLi/dLp * delta Li = -res for delta Li
#endif
if (ierr /= 0_pInt) then
#ifndef _OPENMP #ifndef _OPENMP
if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then if (iand(debug_level(debug_crystallite), debug_levelBasic) /= 0_pInt) then
write(6,'(a,i8,1x,a,i8,a,1x,i2,1x,i3,a,i3)') '<< CRYST >> integrateStress failed on dR/dLi inversion at el ip ipc ', & write(6,'(a,i8,1x,a,i8,a,1x,i2,1x,i3,a,i3)') '<< CRYST >> integrateStress failed on dR/dLi inversion at el ip ipc ', &
el,mesh_element(1,el),ip,ipc el,mesh_element(1,el),ip,ipc
if (iand(debug_level(debug_crystallite), debug_levelExtensive) /= 0_pInt & if (iand(debug_level(debug_crystallite), debug_levelExtensive) /= 0_pInt &
.and. ((el == debug_e .and. ip == debug_i .and. ipc == debug_g)& .and. ((el == debug_e .and. ip == debug_i .and. ipc == debug_g)&
.or. .not. iand(debug_level(debug_crystallite), debug_levelSelective) /= 0_pInt)) then .or. .not. iand(debug_level(debug_crystallite), debug_levelSelective) /= 0_pInt)) then
write(6,*) write(6,*)
write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dR_dLi',transpose(dRLi_dLi) write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dR_dLi',transpose(dRLi_dLi)
write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dFe_dLi',transpose(math_Plain3333to99(dFe_dLi3333)) write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dFe_dLi',transpose(math_Plain3333to99(dFe_dLi3333))
write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dT_dFi_constitutive',transpose(math_Plain3333to99(dT_dFi3333)) write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dT_dFi_constitutive',transpose(math_Plain3333to99(dT_dFi3333))
write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dLi_dT_constitutive',transpose(math_Plain3333to99(dLi_dT3333)) write(6,'(a,/,9(12x,9(e15.3,1x)/))') '<< CRYST >> dLi_dT_constitutive',transpose(math_Plain3333to99(dLi_dT3333))
write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Li_constitutive',math_transpose33(Li_constitutive) write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Li_constitutive',math_transpose33(Li_constitutive)
write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Liguess',math_transpose33(Liguess) write(6,'(a,/,3(12x,3(e20.7,1x)/))') '<< CRYST >> Liguess',math_transpose33(Liguess)
endif
endif endif
#endif
return
endif endif
#endif
return
endif
deltaLi = - math_plain9to33(work) deltaLi = - math_plain9to33(work)
endif endif

View File

@ -2182,7 +2182,7 @@ pure function lattice_qDisorientation(Q1, Q2, struct)
real(pReal), dimension(4) :: lattice_qDisorientation real(pReal), dimension(4) :: lattice_qDisorientation
real(pReal), dimension(4), intent(in) :: & real(pReal), dimension(4), intent(in) :: &
Q1, & ! 1st orientation Q1, & ! 1st orientation
Q2 ! 2nd orientation Q2 ! 2nd orientation
integer(kind(LATTICE_undefined_ID)), optional, intent(in) :: & ! if given, symmetries between the two orientation will be considered integer(kind(LATTICE_undefined_ID)), optional, intent(in) :: & ! if given, symmetries between the two orientation will be considered
struct struct

View File

@ -186,10 +186,6 @@ module math
halton_seed_set, & halton_seed_set, &
i_to_halton, & i_to_halton, &
prime prime
external :: &
dsyev, &
dgetrf, &
dgetri
contains contains
@ -811,15 +807,13 @@ function math_invSym3333(A)
integer(pInt), dimension(6) :: ipiv6 integer(pInt), dimension(6) :: ipiv6
real(pReal), dimension(6,6) :: temp66_Real real(pReal), dimension(6,6) :: temp66_Real
real(pReal), dimension(6) :: work6 real(pReal), dimension(6) :: work6
external :: &
dgetrf, &
dgetri
temp66_real = math_Mandel3333to66(A) temp66_real = math_Mandel3333to66(A)
#if(FLOAT==8)
call dgetrf(6,6,temp66_real,6,ipiv6,ierr) call dgetrf(6,6,temp66_real,6,ipiv6,ierr)
call dgetri(6,temp66_real,6,ipiv6,work6,6,ierr) call dgetri(6,temp66_real,6,ipiv6,work6,6,ierr)
#elif(FLOAT==4)
call sgetrf(6,6,temp66_real,6,ipiv6,ierr)
call sgetri(6,temp66_real,6,ipiv6,work6,6,ierr)
#endif
if (ierr == 0_pInt) then if (ierr == 0_pInt) then
math_invSym3333 = math_Mandel66to3333(temp66_real) math_invSym3333 = math_Mandel66to3333(temp66_real)
else else
@ -847,13 +841,8 @@ subroutine math_invert(myDim,A, InvA, error)
logical, intent(out) :: error logical, intent(out) :: error
invA = A invA = A
#if(FLOAT==8)
call dgetrf(myDim,myDim,invA,myDim,ipiv,ierr) call dgetrf(myDim,myDim,invA,myDim,ipiv,ierr)
call dgetri(myDim,InvA,myDim,ipiv,work,myDim,ierr) call dgetri(myDim,InvA,myDim,ipiv,work,myDim,ierr)
#elif(FLOAT==4)
call sgetrf(myDim,myDim,invA,myDim,ipiv,ierr)
call sgetri(myDim,InvA,myDim,ipiv,work,myDim,ierr)
#endif
error = merge(.true.,.false., ierr /= 0_pInt) ! http://fortraninacworld.blogspot.de/2012/12/ternary-operator.html error = merge(.true.,.false., ierr /= 0_pInt) ! http://fortraninacworld.blogspot.de/2012/12/ternary-operator.html
end subroutine math_invert end subroutine math_invert
@ -1937,16 +1926,13 @@ subroutine math_eigenValuesVectorsSym(m,values,vectors,error)
real(pReal), dimension(size(m,1)), intent(out) :: values real(pReal), dimension(size(m,1)), intent(out) :: values
real(pReal), dimension(size(m,1),size(m,1)), intent(out) :: vectors real(pReal), dimension(size(m,1),size(m,1)), intent(out) :: vectors
logical, intent(out) :: error logical, intent(out) :: error
integer(pInt) :: info integer(pInt) :: info
real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f
external :: &
dsyev
vectors = m ! copy matrix to input (doubles as output) array vectors = m ! copy matrix to input (doubles as output) array
#if(FLOAT==8)
call dsyev('V','U',size(m,1),vectors,size(m,1),values,work,(64+2)*size(m,1),info) call dsyev('V','U',size(m,1),vectors,size(m,1),values,work,(64+2)*size(m,1),info)
#elif(FLOAT==4)
call ssyev('V','U',size(m,1),vectors,size(m,1),values,work,(64+2)*size(m,1),info)
#endif
error = (info == 0_pInt) error = (info == 0_pInt)
end subroutine math_eigenValuesVectorsSym end subroutine math_eigenValuesVectorsSym
@ -2135,16 +2121,13 @@ function math_eigenvaluesSym(m)
real(pReal), dimension(:,:), intent(in) :: m real(pReal), dimension(:,:), intent(in) :: m
real(pReal), dimension(size(m,1)) :: math_eigenvaluesSym real(pReal), dimension(size(m,1)) :: math_eigenvaluesSym
real(pReal), dimension(size(m,1),size(m,1)) :: vectors real(pReal), dimension(size(m,1),size(m,1)) :: vectors
integer(pInt) :: info integer(pInt) :: info
real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f real(pReal), dimension((64+2)*size(m,1)) :: work ! block size of 64 taken from http://www.netlib.org/lapack/double/dsyev.f
external :: &
dsyev
vectors = m ! copy matrix to input (doubles as output) array vectors = m ! copy matrix to input (doubles as output) array
#if(FLOAT==8)
call dsyev('N','U',size(m,1),vectors,size(m,1),math_eigenvaluesSym,work,(64+2)*size(m,1),info) call dsyev('N','U',size(m,1),vectors,size(m,1),math_eigenvaluesSym,work,(64+2)*size(m,1),info)
#elif(FLOAT==4)
call ssyev('N','U',size(m,1),vectors,size(m,1),math_eigenvaluesSym,work,(64+2)*size(m,1),info)
#endif
if (info /= 0_pInt) math_eigenvaluesSym = DAMASK_NaN if (info /= 0_pInt) math_eigenvaluesSym = DAMASK_NaN
end function math_eigenvaluesSym end function math_eigenvaluesSym

View File

@ -4,9 +4,9 @@
!> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH !> @author Christoph Kords, Max-Planck-Institut für Eisenforschung GmbH
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH !> @author Luv Sharma, Max-Planck-Institut für Eisenforschung GmbH
!> @brief setting precision for real and int type depending on makros "FLOAT" and "INT" !> @brief setting precision for real and int type
!> @details setting precision for real and int type and for DAMASK_NaN. Definition is made !> @details setting precision for real and int type and for DAMASK_NaN. Definition is made
!! depending on makros "FLOAT" and "INT" defined during compilation !! depending on makro "INT" defined during compilation
!! for details on NaN see https://software.intel.com/en-us/forums/topic/294680 !! for details on NaN see https://software.intel.com/en-us/forums/topic/294680
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
module prec module prec
@ -18,18 +18,7 @@ module prec
implicit none implicit none
private private
#if (FLOAT==4) #if (FLOAT==8)
#if defined(Spectral) || defined(FEM)
SPECTRAL SOLVER AND OWN FEM DO NOT SUPPORT SINGLE PRECISION, STOPPING COMPILATION
#endif
integer, parameter, public :: pReal = 4 !< floating point single precition (was selected_real_kind(6,37), number with 6 significant digits, up to 1e+-37)
#ifdef __INTEL_COMPILER
real(pReal), parameter, public :: DAMASK_NaN = Z'7F800001' !< quiet NaN for single precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran)
#endif
#ifdef __GFORTRAN__
real(pReal), parameter, public :: DAMASK_NaN = real(Z'7F800001', pReal) !< quiet NaN for single precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran)
#endif
#elif (FLOAT==8)
integer, parameter, public :: pReal = 8 !< floating point double precision (was selected_real_kind(15,300), number with 15 significant digits, up to 1e+-300) integer, parameter, public :: pReal = 8 !< floating point double precision (was selected_real_kind(15,300), number with 15 significant digits, up to 1e+-300)
#ifdef __INTEL_COMPILER #ifdef __INTEL_COMPILER
real(pReal), parameter, public :: DAMASK_NaN = Z'7FF8000000000000' !< quiet NaN for double precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran) real(pReal), parameter, public :: DAMASK_NaN = Z'7FF8000000000000' !< quiet NaN for double precision (from http://www.hpc.unimelb.edu.au/doc/f90lrm/dfum_035.html, copy can be found in documentation/Code/Fortran)
@ -172,9 +161,9 @@ end subroutine prec_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief figures out if a floating point number is NaN !> @brief figures out if a floating point number is NaN
! basically just a small wrapper, because gfortran < 4.9 does not have the IEEE module ! basically just a small wrapper, because gfortran < 5.0 does not have the IEEE module
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
logical elemental function prec_isNaN(a) logical elemental pure function prec_isNaN(a)
implicit none implicit none
real(pReal), intent(in) :: a real(pReal), intent(in) :: a
@ -187,4 +176,30 @@ logical elemental function prec_isNaN(a)
#endif #endif
end function prec_isNaN end function prec_isNaN
!--------------------------------------------------------------------------------------------------
!> @brief equality comparison for double precision
! replaces "==" but for certain (relative) tolerance. Counterpart to dNeq
! http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
!--------------------------------------------------------------------------------------------------
logical elemental pure function dEq(a,b,tol)
real(pReal), intent(in) :: a,b
real(pReal), intent(in), optional :: tol
real(pReal), parameter :: eps = 2.2204460492503131E-16 ! DBL_EPSILON in C
dEq = merge(.True., .False.,abs(a-b) <= merge(tol,eps,present(tol))*maxval(abs([a,b])))
end function dEq
!--------------------------------------------------------------------------------------------------
!> @brief inequality comparison for double precision
! replaces "!=" but for certain (relative) tolerance. Counterpart to dEq
! http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
!--------------------------------------------------------------------------------------------------
logical elemental pure function dNeq(a,b,tol)
real(pReal), intent(in) :: a,b
real(pReal), intent(in), optional :: tol
real(pReal), parameter :: eps = 2.2204460492503131E-16 ! DBL_EPSILON in C
dNeq = merge(.False., .True.,abs(a-b) <= merge(tol,eps,present(tol))*maxval(abs([a,b])))
end function dNeq
end module prec end module prec

View File

@ -517,22 +517,27 @@ class ASCIItable():
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def microstructure_read(self, def microstructure_read(self,
grid): grid,
type = 'i',
strict = False):
"""read microstructure data (from .geom format)""" """read microstructure data (from .geom format)"""
N = grid.prod() # expected number of microstructure indices in data def datatype(item):
microstructure = np.zeros(N,'i') # initialize as flat array return int(item) if type.lower() == 'i' else float(item)
N = grid.prod() # expected number of microstructure indices in data
microstructure = np.zeros(N,type) # initialize as flat array
i = 0 i = 0
while i < N and self.data_read(): while i < N and self.data_read():
items = self.data items = self.data
if len(items) > 2: if len(items) > 2:
if items[1].lower() == 'of': items = [int(items[2])]*int(items[0]) if items[1].lower() == 'of': items = np.ones(datatype(items[0]))*datatype(items[2])
elif items[1].lower() == 'to': items = range(int(items[0]),1+int(items[2])) elif items[1].lower() == 'to': items = np.arange(datatype(items[0]),1+datatype(items[2]))
else: items = map(int,items) else: items = map(datatype,items)
else: items = map(int,items) else: items = map(datatype,items)
s = min(len(items), N-i) # prevent overflow of microstructure array s = min(len(items), N-i) # prevent overflow of microstructure array
microstructure[i:i+s] = items[:s] microstructure[i:i+s] = items[:s]
i += s i += len(items)
return microstructure return (microstructure, i == N and not self.data_read()) if strict else microstructure # check for proper point count and end of file

View File

@ -22,11 +22,11 @@ class Test():
logger = logging.getLogger() logger = logging.getLogger()
logger.setLevel(0) logger.setLevel(0)
fh = logging.FileHandler('test.log') # create file handler which logs even debug messages fh = logging.FileHandler('test.log') # create file handler which logs even debug messages
fh.setLevel(logging.DEBUG) fh.setLevel(logging.DEBUG)
full = logging.Formatter('%(asctime)s - %(levelname)s: \n%(message)s') full = logging.Formatter('%(asctime)s - %(levelname)s: \n%(message)s')
fh.setFormatter(full) fh.setFormatter(full)
ch = logging.StreamHandler(stream=sys.stdout) # create console handler with a higher log level ch = logging.StreamHandler(stream=sys.stdout) # create console handler with a higher log level
ch.setLevel(logging.INFO) ch.setLevel(logging.INFO)
# create formatter and add it to the handlers # create formatter and add it to the handlers
plain = logging.Formatter('%(message)s') plain = logging.Formatter('%(message)s')
@ -60,10 +60,10 @@ class Test():
try: try:
self.postprocess(variant) self.postprocess(variant)
if not self.compare(variant): if not self.compare(variant):
return variant+1 # return culprit return variant+1 # return culprit
except Exception as e : except Exception as e :
logging.critical('\nWARNING:\n %s\n'%e) logging.critical('\nWARNING:\n {}\n'.format(e))
return variant+1 # return culprit return variant+1 # return culprit
return 0 return 0
else: else:
if not self.testPossible(): return -1 if not self.testPossible(): return -1
@ -74,13 +74,13 @@ class Test():
self.prepare(variant) self.prepare(variant)
self.run(variant) self.run(variant)
self.postprocess(variant) self.postprocess(variant)
if self.updateRequested: # update requested if self.updateRequested: # update requested
self.update(variant) self.update(variant)
elif not (self.options.accept or self.compare(variant)): # no update, do comparison elif not (self.options.accept or self.compare(variant)): # no update, do comparison
return variant+1 # return culprit return variant+1 # return culprit
except Exception as e : except Exception as e :
logging.critical('\nWARNING:\n %s\n'%e) logging.critical('\nWARNING:\n {}\n'.format(e))
return variant+1 # return culprit return variant+1 # return culprit
return 0 return 0
def testPossible(self): def testPossible(self):
@ -94,13 +94,13 @@ class Test():
try: try:
shutil.rmtree(self.dirCurrent()) shutil.rmtree(self.dirCurrent())
except: except:
logging.warning('removal of directory "%s" not possible...'%(self.dirCurrent())) logging.warning('removal of directory "{}" not possible...'.format(self.dirCurrent()))
status = status and False status = status and False
try: try:
os.mkdir(self.dirCurrent()) os.mkdir(self.dirCurrent())
except: except:
logging.critical('creation of directory "%s" failed...'%(self.dirCurrent())) logging.critical('creation of directory "{}" failed...'.format(self.dirCurrent()))
status = status and False status = status and False
return status return status
@ -193,19 +193,19 @@ class Test():
try: try:
shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i])) shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i]))
except: except:
logging.critical('Reference2Current: Unable to copy file %s'%file) logging.critical('Reference2Current: Unable to copy file "{}"'.format(file))
def copy_Base2Current(self,sourceDir,sourcefiles=[],targetfiles=[]): def copy_Base2Current(self,sourceDir,sourcefiles=[],targetfiles=[]):
source=os.path.normpath(os.path.join(self.dirBase,'../../../'+sourceDir)) source=os.path.normpath(os.path.join(self.dirBase,'../../..',sourceDir))
if len(targetfiles) == 0: targetfiles = sourcefiles if len(targetfiles) == 0: targetfiles = sourcefiles
for i,file in enumerate(sourcefiles): for i,file in enumerate(sourcefiles):
try: try:
shutil.copy2(os.path.join(source,file),self.fileInCurrent(targetfiles[i])) shutil.copy2(os.path.join(source,file),self.fileInCurrent(targetfiles[i]))
except: except:
logging.error(os.path.join(source,file)) logging.error(os.path.join(source,file))
logging.critical('Base2Current: Unable to copy file %s'%file) logging.critical('Base2Current: Unable to copy file "{}"'.format(file))
def copy_Current2Reference(self,sourcefiles=[],targetfiles=[]): def copy_Current2Reference(self,sourcefiles=[],targetfiles=[]):
@ -215,7 +215,7 @@ class Test():
try: try:
shutil.copy2(self.fileInCurrent(file),self.fileInReference(targetfiles[i])) shutil.copy2(self.fileInCurrent(file),self.fileInReference(targetfiles[i]))
except: except:
logging.critical('Current2Reference: Unable to copy file %s'%file) logging.critical('Current2Reference: Unable to copy file "{}"'.format(file))
def copy_Proof2Current(self,sourcefiles=[],targetfiles=[]): def copy_Proof2Current(self,sourcefiles=[],targetfiles=[]):
@ -225,7 +225,7 @@ class Test():
try: try:
shutil.copy2(self.fileInProof(file),self.fileInCurrent(targetfiles[i])) shutil.copy2(self.fileInProof(file),self.fileInCurrent(targetfiles[i]))
except: except:
logging.critical('Proof2Current: Unable to copy file %s'%file) logging.critical('Proof2Current: Unable to copy file "{}"'.format(file))
def copy_Current2Current(self,sourcefiles=[],targetfiles=[]): def copy_Current2Current(self,sourcefiles=[],targetfiles=[]):
@ -234,7 +234,7 @@ class Test():
try: try:
shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i])) shutil.copy2(self.fileInReference(file),self.fileInCurrent(targetfiles[i]))
except: except:
logging.critical('Current2Current: Unable to copy file %s'%file) logging.critical('Current2Current: Unable to copy file "{}"'.format(file))
def execute_inCurrentDir(self,cmd,streamIn=None): def execute_inCurrentDir(self,cmd,streamIn=None):
@ -252,7 +252,7 @@ class Test():
def compare_Array(self,File1,File2): def compare_Array(self,File1,File2):
import numpy as np import numpy as np
logging.info('comparing\n '+File1+'\n '+File2) logging.info('\n '.join(['comparing',File1,File2]))
table1 = damask.ASCIItable(name=File1,readonly=True) table1 = damask.ASCIItable(name=File1,readonly=True)
table1.head_read() table1.head_read()
len1=len(table1.info)+2 len1=len(table1.info)+2
@ -270,8 +270,9 @@ class Test():
max_loc=np.argmax(abs(refArrayNonZero[curArray.nonzero()]/curArray[curArray.nonzero()]-1.)) max_loc=np.argmax(abs(refArrayNonZero[curArray.nonzero()]/curArray[curArray.nonzero()]-1.))
refArrayNonZero = refArrayNonZero[curArray.nonzero()] refArrayNonZero = refArrayNonZero[curArray.nonzero()]
curArray = curArray[curArray.nonzero()] curArray = curArray[curArray.nonzero()]
print(' ********\n * maximum relative error %e for %e and %e\n ********' print(' ********\n * maximum relative error {} between {} and {}\n ********'.format(max_err,
%(max_err, refArrayNonZero[max_loc],curArray[max_loc])) refArrayNonZero[max_loc],
curArray[max_loc]))
return max_err return max_err
else: else:
raise Exception('mismatch in array size to compare') raise Exception('mismatch in array size to compare')
@ -295,7 +296,7 @@ class Test():
absoluteTolerance=False,perLine=False,skipLines=[]): absoluteTolerance=False,perLine=False,skipLines=[]):
import numpy as np import numpy as np
logging.info('comparing ASCII Tables\n %s \n %s'%(file0,file1)) logging.info('\n '.join(['comparing ASCII Tables',file0,file1]))
if normHeadings == '': normHeadings = headings0 if normHeadings == '': normHeadings = headings0
# check if comparison is possible and determine lenght of columns # check if comparison is possible and determine lenght of columns
@ -315,7 +316,7 @@ class Test():
for i in xrange(dataLength): for i in xrange(dataLength):
if headings0[i]['shape'] != headings1[i]['shape']: if headings0[i]['shape'] != headings1[i]['shape']:
raise Exception('shape mismatch when comparing %s with %s '%(headings0[i]['label'],headings1[i]['label'])) raise Exception('shape mismatch between {} and {} '.format(headings0[i]['label'],headings1[i]['label']))
shape[i] = headings0[i]['shape'] shape[i] = headings0[i]['shape']
for j in xrange(np.shape(shape[i])[0]): for j in xrange(np.shape(shape[i])[0]):
length[i] *= shape[i][j] length[i] *= shape[i][j]
@ -323,7 +324,9 @@ class Test():
for j in xrange(np.shape(normShape[i])[0]): for j in xrange(np.shape(normShape[i])[0]):
normLength[i] *= normShape[i][j] normLength[i] *= normShape[i][j]
else: else:
raise Exception('trying to compare %i with %i normed by %i data sets'%(len(headings0),len(headings1),len(normHeadings))) raise Exception('trying to compare {} with {} normed by {} data sets'.format(len(headings0),
len(headings1),
len(normHeadings)))
table0 = damask.ASCIItable(name=file0,readonly=True) table0 = damask.ASCIItable(name=file0,readonly=True)
table0.head_read() table0.head_read()
@ -331,37 +334,34 @@ class Test():
table1.head_read() table1.head_read()
for i in xrange(dataLength): for i in xrange(dataLength):
key0 = {True :'1_%s', key0 = ('1_' if length[i]>1 else '') + headings0[i]['label']
False:'%s' }[length[i]>1]%headings0[i]['label'] key1 = ('1_' if length[i]>1 else '') + headings1[i]['label']
key1 = {True :'1_%s', normKey = ('1_' if normLength[i]>1 else '') + normHeadings[i]['label']
False:'%s' }[length[i]>1]%headings1[i]['label']
normKey = {True :'1_%s',
False:'%s' }[normLength[i]>1]%normHeadings[i]['label']
if key0 not in table0.labels: if key0 not in table0.labels:
raise Exception('column %s not found in 1. table...\n'%key0) raise Exception('column {} not found in 1. table...\n'.format(key0))
elif key1 not in table1.labels: elif key1 not in table1.labels:
raise Exception('column %s not found in 2. table...\n'%key1) raise Exception('column {} not found in 2. table...\n'.format(key1))
elif normKey not in table0.labels: elif normKey not in table0.labels:
raise Exception('column %s not found in 1. table...\n'%normKey) raise Exception('column {} not found in 1. table...\n'.format(normKey))
else: else:
column[0][i] = table0.labels.index(key0) # remember columns of requested data column[0][i] = table0.labels.index(key0)
column[1][i] = table1.labels.index(key1) # remember columns of requested data in second column column[1][i] = table1.labels.index(key1)
normColumn[i] = table0.labels.index(normKey) # remember columns of requested data in second column normColumn[i] = table0.labels.index(normKey)
line0 = 0 line0 = 0
while table0.data_read(): # read next data line of ASCII table while table0.data_read(): # read next data line of ASCII table
if line0 not in skipLines: if line0 not in skipLines:
for i in xrange(dataLength): for i in xrange(dataLength):
myData = np.array(map(float,table0.data[column[0][i]:\ myData = np.array(map(float,table0.data[column[0][i]:\
column[0][i]+length[i]]),'d') column[0][i]+length[i]]),'d')
normData = np.array(map(float,table0.data[normColumn[i]:\ normData = np.array(map(float,table0.data[normColumn[i]:\
normColumn[i]+normLength[i]]),'d') normColumn[i]+normLength[i]]),'d')
data[i] = np.append(data[i],np.reshape(myData,shape[i])) data[i] = np.append(data[i],np.reshape(myData,shape[i]))
if normType == 'pInf': if normType == 'pInf':
norm[i] = np.append(norm[i],np.max(np.abs(normData))) norm[i] = np.append(norm[i],np.max(np.abs(normData)))
else: else:
norm[i] = np.append(norm[i],np.linalg.norm(np.reshape(normData,normShape[i]),normType)) norm[i] = np.append(norm[i],np.linalg.norm(np.reshape(normData,normShape[i]),normType))
line0 +=1 line0 += 1
for i in xrange(dataLength): for i in xrange(dataLength):
if not perLine: norm[i] = [np.max(norm[i]) for j in xrange(line0-len(skipLines))] if not perLine: norm[i] = [np.max(norm[i]) for j in xrange(line0-len(skipLines))]
@ -370,12 +370,12 @@ class Test():
norm[i] = [1.0 for j in xrange(line0-len(skipLines))] norm[i] = [1.0 for j in xrange(line0-len(skipLines))]
absTol[i] = True absTol[i] = True
if perLine: if perLine:
logging.warning('At least one norm of %s in 1. table is 0.0, using absolute tolerance'%headings0[i]['label']) logging.warning('At least one norm of {} in 1. table is 0.0, using absolute tolerance'.format(headings0[i]['label']))
else: else:
logging.warning('Maximum norm of %s in 1. table is 0.0, using absolute tolerance'%headings0[i]['label']) logging.warning('Maximum norm of {} in 1. table is 0.0, using absolute tolerance'.format(headings0[i]['label']))
line1 = 0 line1 = 0
while table1.data_read(): # read next data line of ASCII table while table1.data_read(): # read next data line of ASCII table
if line1 not in skipLines: if line1 not in skipLines:
for i in xrange(dataLength): for i in xrange(dataLength):
myData = np.array(map(float,table1.data[column[1][i]:\ myData = np.array(map(float,table1.data[column[1][i]:\
@ -384,21 +384,25 @@ class Test():
norm[i][line1-len(skipLines)]) norm[i][line1-len(skipLines)])
line1 +=1 line1 +=1
if (line0 != line1): raise Exception('found %s lines in 1. table and %s in 2. table'%(line0,line1)) if (line0 != line1): raise Exception('found {} lines in 1. table but {} in 2. table'.format(line0,line1))
logging.info(' ********') logging.info(' ********')
for i in xrange(dataLength): for i in xrange(dataLength):
if absTol[i]: if absTol[i]:
logging.info(' * maximum absolute error %e for %s and %s'%(maxError[i],headings0[i]['label'],headings1[i]['label'])) logging.info(' * maximum absolute error {} between {} and {}'.format(maxError[i],
headings0[i]['label'],
headings1[i]['label']))
else: else:
logging.info(' * maximum relative error %e for %s and %s'%(maxError[i],headings0[i]['label'],headings1[i]['label'])) logging.info(' * maximum relative error {} between {} and {}'.format(maxError[i],
headings0[i]['label'],
headings1[i]['label']))
logging.info(' ********') logging.info(' ********')
return maxError return maxError
def compare_TablesStatistically(self, def compare_TablesStatistically(self,
files = [None,None], # list of file names files = [None,None], # list of file names
columns = [None], # list of list of column labels (per file) columns = [None], # list of list of column labels (per file)
meanTol = 1.0e-4, meanTol = 1.0e-4,
stdTol = 1.0e-6, stdTol = 1.0e-6,
preFilter = 1.0e-9): preFilter = 1.0e-9):
@ -407,18 +411,18 @@ class Test():
threshold can be used to ignore small values (a negative number disables this feature) threshold can be used to ignore small values (a negative number disables this feature)
""" """
if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested
files = [str(files)] files = [str(files)]
tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files] tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files]
for table in tables: for table in tables:
table.head_read() table.head_read()
columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files
columns = columns[:len(files)] # truncate to same length as files columns = columns[:len(files)] # truncate to same length as files
for i,column in enumerate(columns): for i,column in enumerate(columns):
if column is None: columns[i] = tables[i].labels # if no column is given, read all if column is None: columns[i] = tables[i].labels # if no column is given, read all
logging.info('comparing ASCIItables statistically') logging.info('comparing ASCIItables statistically')
for i in xrange(len(columns)): for i in xrange(len(columns)):
@ -428,7 +432,7 @@ class Test():
) )
logging.info(files[i]+':'+','.join(columns[i])) logging.info(files[i]+':'+','.join(columns[i]))
if len(files) < 2: return True # single table is always close to itself... if len(files) < 2: return True # single table is always close to itself...
data = [] data = []
for table,labels in zip(tables,columns): for table,labels in zip(tables,columns):
@ -443,16 +447,16 @@ class Test():
normedDelta = np.where(normBy>preFilter,delta/normBy,0.0) normedDelta = np.where(normBy>preFilter,delta/normBy,0.0)
mean = np.amax(np.abs(np.mean(normedDelta,0))) mean = np.amax(np.abs(np.mean(normedDelta,0)))
std = np.amax(np.std(normedDelta,0)) std = np.amax(np.std(normedDelta,0))
logging.info('mean: %f'%mean) logging.info('mean: {:f}'.format(mean))
logging.info('std: %f'%std) logging.info('std: {:f}'.format(std))
return (mean<meanTol) & (std < stdTol) return (mean<meanTol) & (std < stdTol)
def compare_Tables(self, def compare_Tables(self,
files = [None,None], # list of file names files = [None,None], # list of file names
columns = [None], # list of list of column labels (per file) columns = [None], # list of list of column labels (per file)
rtol = 1e-5, rtol = 1e-5,
atol = 1e-8, atol = 1e-8,
preFilter = -1.0, preFilter = -1.0,
@ -463,18 +467,18 @@ class Test():
threshold can be used to ignore small values (a negative number disables this feature) threshold can be used to ignore small values (a negative number disables this feature)
""" """
if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested
files = [str(files)] files = [str(files)]
tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files] tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files]
for table in tables: for table in tables:
table.head_read() table.head_read()
columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files
columns = columns[:len(files)] # truncate to same length as files columns = columns[:len(files)] # truncate to same length as files
for i,column in enumerate(columns): for i,column in enumerate(columns):
if column is None: columns[i] = tables[i].labels # if no column is given, read all if column is None: columns[i] = tables[i].labels # if no column is given, read all
logging.info('comparing ASCIItables') logging.info('comparing ASCIItables')
for i in xrange(len(columns)): for i in xrange(len(columns)):
@ -484,7 +488,7 @@ class Test():
) )
logging.info(files[i]+':'+','.join(columns[i])) logging.info(files[i]+':'+','.join(columns[i]))
if len(files) < 2: return True # single table is always close to itself... if len(files) < 2: return True # single table is always close to itself...
maximum = np.zeros(len(columns[0]),dtype='f') maximum = np.zeros(len(columns[0]),dtype='f')
data = [] data = []
@ -495,26 +499,26 @@ class Test():
table.close() table.close()
maximum /= len(tables) maximum /= len(tables)
maximum = np.where(maximum >0.0, maximum, 1) # do not devide by zero for empty columns maximum = np.where(maximum >0.0, maximum, 1) # avoid div by zero for empty columns
for i in xrange(len(data)): for i in xrange(len(data)):
data[i] /= maximum data[i] /= maximum
mask = np.zeros_like(table.data,dtype='bool') mask = np.zeros_like(table.data,dtype='bool')
for table in data: for table in data:
mask |= np.where(np.abs(table)<postFilter,True,False) # mask out (all) tiny values mask |= np.where(np.abs(table)<postFilter,True,False) # mask out (all) tiny values
allclose = True # start optimistic allclose = True # start optimistic
for i in xrange(1,len(data)): for i in xrange(1,len(data)):
if debug: if debug:
t0 = np.where(mask,0.0,data[i-1]) t0 = np.where(mask,0.0,data[i-1])
t1 = np.where(mask,0.0,data[i ]) t1 = np.where(mask,0.0,data[i ])
j = np.argmin(np.abs(t1)*rtol+atol-np.abs(t0-t1)) j = np.argmin(np.abs(t1)*rtol+atol-np.abs(t0-t1))
logging.info('%f'%np.amax(np.abs(t0-t1)/(np.abs(t1)*rtol+atol))) logging.info('{:f}'.format(np.amax(np.abs(t0-t1)/(np.abs(t1)*rtol+atol))))
logging.info('%f %f'%((t0*maximum).flatten()[j],(t1*maximum).flatten()[j])) logging.info('{:f} {:f}'.format((t0*maximum).flatten()[j],(t1*maximum).flatten()[j]))
allclose &= np.allclose(np.where(mask,0.0,data[i-1]), allclose &= np.allclose(np.where(mask,0.0,data[i-1]),
np.where(mask,0.0,data[i ]),rtol,atol) # accumulate "pessimism" np.where(mask,0.0,data[i ]),rtol,atol) # accumulate "pessimism"
return allclose return allclose
@ -543,14 +547,13 @@ class Test():
def report_Success(self,culprit): def report_Success(self,culprit):
if culprit == 0: if culprit == 0:
logging.critical('%s passed.'%({False: 'The test', logging.critical(('The test' if len(self.variants) == 1 else 'All {} tests'.format(len(self.variants))) + ' passed')
True: 'All %i tests'%(len(self.variants))}[len(self.variants) > 1]))
logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n') logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
return 0 return 0
if culprit == -1: if culprit == -1:
logging.warning('Warning: Could not start test') logging.warning('Warning: Could not start test')
return 0 return 0
else: else:
logging.critical(' ********\n * Test %i failed...\n ********'%(culprit)) logging.critical(' ********\n * Test {} failed...\n ********'.format(culprit))
logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n') logging.critical('\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
return culprit return culprit

View File

@ -58,6 +58,24 @@ def emph(what):
"""emphasizes string on screen""" """emphasizes string on screen"""
return bcolors.BOLD+srepr(what)+bcolors.ENDC return bcolors.BOLD+srepr(what)+bcolors.ENDC
# -----------------------------
def execute(cmd,
streamIn = None,
wd = './'):
"""executes a command in given directory and returns stdout and stderr for optional stdin"""
initialPath = os.getcwd()
os.chdir(wd)
process = subprocess.Popen(shlex.split(cmd),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE)
out,error = [i.replace("\x08","") for i in (process.communicate() if streamIn is None
else process.communicate(streamIn.read()))]
os.chdir(initialPath)
if process.returncode != 0: raise RuntimeError('{} failed with returncode {}'.format(cmd,process.returncode))
return out,error
# ----------------------------- # -----------------------------
# Matlab like trigonometric functions that take and return angles in degrees. # Matlab like trigonometric functions that take and return angles in degrees.
# ----------------------------- # -----------------------------
@ -75,7 +93,7 @@ def gridLocation(idx,res):
# ----------------------------- # -----------------------------
def gridIndex(location,res): def gridIndex(location,res):
return ( location[0] % res[0] + \ return ( location[0] % res[0] + \
( location[1] % res[1]) * res[0] + \ ( location[1] % res[1]) * res[0] + \
( location[2] % res[2]) * res[1] * res[0] ) ( location[2] % res[2]) * res[1] * res[0] )
@ -104,7 +122,9 @@ class extendableOption(Option):
class backgroundMessage(threading.Thread): class backgroundMessage(threading.Thread):
"""reporting with animation to indicate progress""" """reporting with animation to indicate progress"""
choices = {'bounce': ['_','o','O','°','¯','¯','°','O','o',], choices = {'bounce': ['_', 'o', 'O', u'\u00B0',
u'\u203e',u'\u203e',u'\u00B0','O','o','_'],
'spin': [u'\u25dc',u'\u25dd',u'\u25de',u'\u25df'],
'circle': [u'\u25f4',u'\u25f5',u'\u25f6',u'\u25f7'], 'circle': [u'\u25f4',u'\u25f5',u'\u25f6',u'\u25f7'],
'hexagon': [u'\u2b22',u'\u2b23'], 'hexagon': [u'\u2b22',u'\u2b23'],
'square': [u'\u2596',u'\u2598',u'\u259d',u'\u2597'], 'square': [u'\u2596',u'\u2598',u'\u259d',u'\u2597'],
@ -228,7 +248,7 @@ def leastsqBound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
return shape(res), dt return shape(res), dt
def _int2extGrad(p_int, bounds): def _int2extGrad(p_int, bounds):
"""Calculate the gradients of transforming the internal (unconstrained) to external (constained) parameter.""" """Calculate the gradients of transforming the internal (unconstrained) to external (constrained) parameter."""
grad = np.empty_like(p_int) grad = np.empty_like(p_int)
for i, (x, bound) in enumerate(zip(p_int, bounds)): for i, (x, bound) in enumerate(zip(p_int, bounds)):
lower, upper = bound lower, upper = bound
@ -430,22 +450,4 @@ def curve_fit_bound(f, xdata, ydata, p0=None, sigma=None, bounds=None, **kw):
else: else:
pcov = np.inf pcov = np.inf
if return_full: return (popt, pcov, infodict, errmsg, ier) if return_full else (popt, pcov)
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def execute(cmd,streamIn=None,wd='./'):
"""executes a command in given directory and returns stdout and stderr for optional stdin"""
initialPath=os.getcwd()
os.chdir(wd)
process = subprocess.Popen(shlex.split(cmd),stdout=subprocess.PIPE,stderr = subprocess.PIPE,stdin=subprocess.PIPE)
if streamIn is not None:
out,error = [i.replace("\x08","") for i in process.communicate(streamIn.read())]
else:
out,error =[i.replace("\x08","") for i in process.communicate()]
os.chdir(initialPath)
if process.returncode !=0: raise RuntimeError(cmd+' failed with returncode '+str(process.returncode))
return out,error

View File

@ -10,40 +10,35 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
def curlFFT(geomdim,field): def curlFFT(geomdim,field):
N = grid.prod() # field size grid = np.array(np.shape(field)[2::-1])
n = np.array(np.shape(field)[3:]).prod() # data size N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
if n == 3: if n == 3: dataType = 'vector'
dataType = 'vector' elif n == 9: dataType = 'tensor'
elif n == 9:
dataType = 'tensor'
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
curl_fourier = np.zeros(field_fourier.shape,'c16') curl_fourier = np.zeros(field_fourier.shape,'c16')
# differentiation in Fourier space # differentiation in Fourier space
k_s = np.zeros([3],'i') k_s = np.zeros([3],'i')
TWOPIIMG = (0.0+2.0j*math.pi) TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]): for i in xrange(grid[2]):
k_s[0] = i k_s[0] = i
if(grid[2]%2==0 and i == grid[2]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[0]=0 elif i > grid[2]//2: k_s[0] -= grid[2]
elif (i > grid[2]//2):
k_s[0] = k_s[0] - grid[2]
for j in xrange(grid[1]): for j in xrange(grid[1]):
k_s[1] = j k_s[1] = j
if(grid[1]%2==0 and j == grid[1]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[1]=0 elif j > grid[1]//2: k_s[1] -= grid[1]
elif (j > grid[1]//2):
k_s[1] = k_s[1] - grid[1]
for k in xrange(grid[0]//2+1): for k in xrange(grid[0]//2+1):
k_s[2] = k k_s[2] = k
if(grid[0]%2==0 and k == grid[0]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[2]=0
xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order
xi = np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16')
if dataType == 'tensor': if dataType == 'tensor':
for l in xrange(3): for l in xrange(3):
curl_fourier[i,j,k,0,l] = ( field_fourier[i,j,k,l,2]*xi[1]\ curl_fourier[i,j,k,0,l] = ( field_fourier[i,j,k,l,2]*xi[1]\
@ -100,10 +95,8 @@ if options.vector is None and options.tensor is None:
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,buffered = False)
table = damask.ASCIItable(name = name,buffered = False) except: continue
except:
continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ # ------------------------------------------ read header ------------------------------------------
@ -161,9 +154,10 @@ for name in filenames:
stack = [table.data] stack = [table.data]
for type, data in items.iteritems(): for type, data in items.iteritems():
for i,label in enumerate(data['active']): for i,label in enumerate(data['active']):
stack.append(curlFFT(size[::-1], # we need to reverse order here, because x # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
table.data[:,data['column'][i]:data['column'][i]+data['dim']]. # is fastest,ie rightmost, but leftmost in stack.append(curlFFT(size[::-1],
reshape([grid[2],grid[1],grid[0]]+data['shape']))) # our x,y,z notation table.data[:,data['column'][i]:data['column'][i]+data['dim']].
reshape([grid[2],grid[1],grid[0]]+data['shape'])))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------

View File

@ -10,39 +10,35 @@ scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
def divFFT(geomdim,field): def divFFT(geomdim,field):
N = grid.prod() # field size grid = np.array(np.shape(field)[2::-1])
n = np.array(np.shape(field)[3:]).prod() # data size N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2)) field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector div_fourier = np.zeros(field_fourier.shape[0:len(np.shape(field))-1],'c16') # size depents on whether tensor or vector
# differentiation in Fourier space # differentiation in Fourier space
k_s=np.zeros([3],'i') k_s=np.zeros([3],'i')
TWOPIIMG = (0.0+2.0j*math.pi) TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]): for i in xrange(grid[2]):
k_s[0] = i k_s[0] = i
if(grid[2]%2==0 and i == grid[2]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[0]=0 elif i > grid[2]//2: k_s[0] -= grid[2]
elif (i > grid[2]//2):
k_s[0] = k_s[0] - grid[2]
for j in xrange(grid[1]): for j in xrange(grid[1]):
k_s[1] = j k_s[1] = j
if(grid[1]%2==0 and j == grid[1]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[1]=0 elif j > grid[1]//2: k_s[1] -= grid[1]
elif (j > grid[1]//2):
k_s[1] = k_s[1] - grid[1]
for k in xrange(grid[0]//2+1): for k in xrange(grid[0]//2+1):
k_s[2] = k k_s[2] = k
if(grid[0]%2==0 and k == grid[0]//2): # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011) if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
k_s[2]=0
xi=np.array([k_s[2]/geomdim[2]+0.0j,k_s[1]/geomdim[1]+0.j,k_s[0]/geomdim[0]+0.j],'c16') xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field input order
if n == 9: # tensor, 3x3 -> 3 if n == 9: # tensor, 3x3 -> 3
for l in xrange(3): for l in xrange(3):
div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG div_fourier[i,j,k,l] = sum(field_fourier[i,j,k,l,0:3]*xi) *TWOPIIMG
elif n == 3: # vector, 3 -> 1 elif n == 3: # vector, 3 -> 1
div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG div_fourier[i,j,k] = sum(field_fourier[i,j,k,0:3]*xi) *TWOPIIMG
return np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)).reshape([N,n/3]) return np.fft.fftpack.irfftn(div_fourier,axes=(0,1,2)).reshape([N,n/3])
@ -80,15 +76,13 @@ parser.set_defaults(coords = 'ipinitialcoord',
if options.vector is None and options.tensor is None: if options.vector is None and options.tensor is None:
parser.error('no data column specified.') parser.error('no data column specified.')
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,buffered = False)
table = damask.ASCIItable(name = name,buffered = False) except: continue
except:
continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ # ------------------------------------------ read header ------------------------------------------
@ -140,16 +134,17 @@ for name in filenames:
maxcorner = np.array(map(max,coords)) maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i') grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1) size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 set to smallest among other spacings size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1])) # spacing for grid==1 equal to smallest among other ones
# ------------------------------------------ process value field ----------------------------------- # ------------------------------------------ process value field -----------------------------------
stack = [table.data] stack = [table.data]
for type, data in items.iteritems(): for type, data in items.iteritems():
for i,label in enumerate(data['active']): for i,label in enumerate(data['active']):
stack.append(divFFT(size[::-1], # we need to reverse order here, because x # we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
table.data[:,data['column'][i]:data['column'][i]+data['dim']]. # is fastest,ie rightmost, but leftmost in stack.append(divFFT(size[::-1],
reshape([grid[2],grid[1],grid[0]]+data['shape']))) # our x,y,z notation table.data[:,data['column'][i]:data['column'][i]+data['dim']].
reshape([grid[2],grid[1],grid[0]]+data['shape'])))
# ------------------------------------------ output result ----------------------------------------- # ------------------------------------------ output result -----------------------------------------

158
processing/post/addGradient.py Executable file
View File

@ -0,0 +1,158 @@
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
import os,sys,math
import numpy as np
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def gradFFT(geomdim,field):
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
if n == 3: dataType = 'vector'
elif n == 1: dataType = 'scalar'
field_fourier = np.fft.fftpack.rfftn(field,axes=(0,1,2))
grad_fourier = np.zeros(field_fourier.shape+(3,),'c16')
# differentiation in Fourier space
k_s = np.zeros([3],'i')
TWOPIIMG = 2.0j*math.pi
for i in xrange(grid[2]):
k_s[0] = i
if grid[2]%2 == 0 and i == grid[2]//2: k_s[0] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif i > grid[2]//2: k_s[0] -= grid[2]
for j in xrange(grid[1]):
k_s[1] = j
if grid[1]%2 == 0 and j == grid[1]//2: k_s[1] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
elif j > grid[1]//2: k_s[1] -= grid[1]
for k in xrange(grid[0]//2+1):
k_s[2] = k
if grid[0]%2 == 0 and k == grid[0]//2: k_s[2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
xi = (k_s/geomdim)[2::-1].astype('c16') # reversing the field order
grad_fourier[i,j,k,0,:] = field_fourier[i,j,k,0]*xi *TWOPIIMG # vector field from scalar data
if dataType == 'vector':
grad_fourier[i,j,k,1,:] = field_fourier[i,j,k,1]*xi *TWOPIIMG # tensor field from vector data
grad_fourier[i,j,k,2,:] = field_fourier[i,j,k,2]*xi *TWOPIIMG
return np.fft.fftpack.irfftn(grad_fourier,axes=(0,1,2)).reshape([N,3*n])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing gradient of requested column(s).
Operates on periodic ordered three-dimensional data sets.
Deals with both vector- and scalar fields.
""", version = scriptID)
parser.add_option('-c','--coordinates',
dest = 'coords',
type = 'string', metavar='string',
help = 'column heading for coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing vector field values')
parser.add_option('-s','--scalar',
dest = 'scalar',
action = 'extend', metavar = '<string LIST>',
help = 'heading of columns containing scalar field values')
parser.set_defaults(coords = 'ipinitialcoord',
)
(options,filenames) = parser.parse_args()
if options.vector is None and options.scalar is None:
parser.error('no data column specified.')
# --- loop over input files ------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords))
else: colCoord = table.label_index(options.coords)
for type, data in items.iteritems():
for what in (data['labels'] if data['labels'] is not None else []):
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_gradFFT({})'.format(i+1,label) for i in xrange(3 * data['dim'])]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in xrange(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
size = grid/np.maximum(np.ones(3,'d'), grid-1.0) * (maxcorner-mincorner) # size from edge to edge = dim * n/(n-1)
size = np.where(grid > 1, size, min(size[grid > 1]/grid[grid > 1]))
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for type, data in items.iteritems():
for i,label in enumerate(data['active']):
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(gradFFT(size[::-1],
table.data[:,data['column'][i]:data['column'][i]+data['dim']].
reshape([grid[2],grid[1],grid[0]]+data['shape'])))
# ------------------------------------------ output result -----------------------------------------
if len(stack) > 1: table.data = np.hstack(tuple(stack))
table.data_writeArray('%.12g')
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -35,7 +35,7 @@ Filter rows according to condition and columns by either white or black listing.
Examples: Examples:
Every odd row if x coordinate is positive -- " #ip.x# >= 0.0 and #_row_#%2 == 1 ). Every odd row if x coordinate is positive -- " #ip.x# >= 0.0 and #_row_#%2 == 1 ).
All rows where label 'foo' equals 'bar' -- " #foo# == \"bar\" " All rows where label 'foo' equals 'bar' -- " #s#foo# == 'bar' "
""", version = scriptID) """, version = scriptID)

View File

@ -151,7 +151,7 @@ for name in filenames:
writer = vtk.vtkXMLRectilinearGridWriter() writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetDataModeToBinary() writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib() writer.SetCompressorTypeToZLib()
writer.SetFileName(os.path.splitext(options.vtk)[0]+('' if options.inplace else '_added.vtr')) writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid) if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid)
else: writer.SetInputData(rGrid) else: writer.SetInputData(rGrid)
writer.Write() writer.Write()

View File

@ -28,24 +28,32 @@ parser.add_option('-o', '--offset',
help = 'a,b,c offset from old to new origin of grid [%default]') help = 'a,b,c offset from old to new origin of grid [%default]')
parser.add_option('-f', '--fill', parser.add_option('-f', '--fill',
dest = 'fill', dest = 'fill',
type = 'int', metavar = 'int', type = 'float', metavar = 'float',
help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]') help = '(background) canvas grain index. "0" selects maximum microstructure index + 1 [%default]')
parser.add_option('--float',
dest = 'real',
action = 'store_true',
help = 'input data is float [%default]')
parser.set_defaults(grid = ['0','0','0'], parser.set_defaults(grid = ['0','0','0'],
offset = (0,0,0), offset = (0,0,0),
fill = 0, fill = 0,
real = False,
) )
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
datatype = 'f' if options.real else 'i'
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False,
buffered = False, labeled = False) labeled = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -71,7 +79,7 @@ for name in filenames:
# --- read data ------------------------------------------------------------------------------------ # --- read data ------------------------------------------------------------------------------------
microstructure = table.microstructure_read(info['grid']).reshape(info['grid'],order='F') # read microstructure microstructure = table.microstructure_read(info['grid'],datatype).reshape(info['grid'],order='F') # read microstructure
# --- do work ------------------------------------------------------------------------------------ # --- do work ------------------------------------------------------------------------------------
@ -85,8 +93,8 @@ for name in filenames:
else int(n) for o,n in zip(info['grid'],options.grid)],'i') else int(n) for o,n in zip(info['grid'],options.grid)],'i')
newInfo['grid'] = np.where(newInfo['grid'] > 0, newInfo['grid'],info['grid']) newInfo['grid'] = np.where(newInfo['grid'] > 0, newInfo['grid'],info['grid'])
microstructure_cropped = np.zeros(newInfo['grid'],'i') microstructure_cropped = np.zeros(newInfo['grid'],datatype)
microstructure_cropped.fill(options.fill if options.fill > 0 else microstructure.max()+1) microstructure_cropped.fill(options.fill if options.real or options.fill > 0 else microstructure.max()+1)
xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \ xindex = list(set(xrange(options.offset[0],options.offset[0]+newInfo['grid'][0])) & \
set(xrange(info['grid'][0]))) set(xrange(info['grid'][0])))
yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \ yindex = list(set(xrange(options.offset[1],options.offset[1]+newInfo['grid'][1])) & \
@ -152,9 +160,9 @@ for name in filenames:
# --- write microstructure information ------------------------------------------------------------ # --- write microstructure information ------------------------------------------------------------
formatwidth = int(math.floor(math.log10(microstructure_cropped.max())+1)) format = '%g' if options.real else '%{}i'.format(int(math.floor(math.log10(microstructure_cropped.max())+1)))
table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose() table.data = microstructure_cropped.reshape((newInfo['grid'][0],newInfo['grid'][1]*newInfo['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter=' ') table.data_writeArray(format,delimiter=' ')
# --- output finalization -------------------------------------------------------------------------- # --- output finalization --------------------------------------------------------------------------

View File

@ -54,12 +54,25 @@ for name in filenames:
errors = [] errors = []
if np.any(info['grid'] < 1): errors.append('invalid grid a b c.') if np.any(info['grid'] < 1): errors.append('invalid grid a b c.')
if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.') if np.any(info['size'] <= 0.0): errors.append('invalid size x y z.')
#--- read microstructure information --------------------------------------------------------------
if options.data:
microstructure,ok = table.microstructure_read(info['grid'],strict = True) # read microstructure
if ok:
structure = vtk.vtkIntArray()
structure.SetName('Microstructures')
for idx in microstructure: structure.InsertNextValue(idx)
else: errors.append('mismatch between data and grid dimension.')
if errors != []: if errors != []:
damask.util.croak(errors) damask.util.croak(errors)
table.close(dismiss = True) table.close(dismiss = True)
continue continue
# --- generate VTK rectilinear grid -------------------------------------------------------------------------------- # --- generate VTK rectilinear grid ---------------------------------------------------------------
grid = vtk.vtkRectilinearGrid() grid = vtk.vtkRectilinearGrid()
grid.SetDimensions([x+1 for x in info['grid']]) grid.SetDimensions([x+1 for x in info['grid']])
@ -72,18 +85,8 @@ for name in filenames:
elif i == 1: grid.SetYCoordinates(temp) elif i == 1: grid.SetYCoordinates(temp)
elif i == 2: grid.SetZCoordinates(temp) elif i == 2: grid.SetZCoordinates(temp)
#--- read microstructure information --------------------------------------------------------------
if options.data: if options.data: grid.GetCellData().AddArray(structure)
microstructure = table.microstructure_read(info['grid']) # read microstructure
structure = vtk.vtkIntArray()
structure.SetName('Microstructures')
for idx in microstructure:
structure.InsertNextValue(idx)
grid.GetCellData().AddArray(structure)
# --- write data ----------------------------------------------------------------------------------- # --- write data -----------------------------------------------------------------------------------
if name: if name:

View File

@ -18,6 +18,14 @@ Unpack geometry files containing ranges "a to b" and/or "n of x" multiples (excl
""", version = scriptID) """, version = scriptID)
parser.add_option('-1', '--onedimensional',
dest = 'oneD',
action = 'store_true',
help = 'output geom file with one-dimensional data arrangement')
parser.set_defaults(oneD = False,
)
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
@ -69,7 +77,8 @@ for name in filenames:
microstructure = table.microstructure_read(info['grid']) # read microstructure microstructure = table.microstructure_read(info['grid']) # read microstructure
formatwidth = int(math.floor(math.log10(microstructure.max())+1)) # efficient number printing format formatwidth = int(math.floor(math.log10(microstructure.max())+1)) # efficient number printing format
table.data = microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose() table.data = microstructure if options.oneD else \
microstructure.reshape((info['grid'][0],info['grid'][1]*info['grid'][2]),order='F').transpose()
table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ') table.data_writeArray('%%%ii'%(formatwidth),delimiter = ' ')
#--- output finalization -------------------------------------------------------------------------- #--- output finalization --------------------------------------------------------------------------