Merge branch 'development' of magit1.mpie.de:damask/DAMASK into development
This commit is contained in:
commit
cd5eddb444
|
@ -139,6 +139,7 @@ program DAMASK_spectral
|
|||
integer(MPI_OFFSET_KIND) :: fileOffset
|
||||
integer(MPI_OFFSET_KIND), dimension(:), allocatable :: outputSize
|
||||
integer(pInt), parameter :: maxByteOut = 2147483647-4096 !< limit of one file output write https://trac.mpich.org/projects/mpich/ticket/1742
|
||||
integer(pInt), parameter :: maxRealOut = maxByteOut/pReal
|
||||
integer(pLongInt), dimension(2) :: outputIndex
|
||||
PetscErrorCode :: ierr
|
||||
external :: &
|
||||
|
@ -444,8 +445,8 @@ program DAMASK_spectral
|
|||
|
||||
if (.not. appendToOutFile) then ! if not restarting, write 0th increment
|
||||
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
|
||||
outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, &
|
||||
min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
|
||||
outputIndex=int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, &
|
||||
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
|
||||
call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
|
||||
[(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), &
|
||||
(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,&
|
||||
|
@ -471,7 +472,7 @@ program DAMASK_spectral
|
|||
! forwarding time
|
||||
timeIncOld = timeinc
|
||||
if (loadCases(currentLoadCase)%logscale == 0_pInt) then ! linear scale
|
||||
timeinc = loadCases(currentLoadCase)%time/loadCases(currentLoadCase)%incs ! only valid for given linear time scale. will be overwritten later in case loglinear scale is used
|
||||
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pReal) ! only valid for given linear time scale. will be overwritten later in case loglinear scale is used
|
||||
else
|
||||
if (currentLoadCase == 1_pInt) then ! 1st currentLoadCase of logarithmic scale
|
||||
if (inc == 1_pInt) then ! 1st inc of 1st currentLoadCase of logarithmic scale
|
||||
|
@ -653,8 +654,8 @@ program DAMASK_spectral
|
|||
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
|
||||
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
|
||||
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
|
||||
outputIndex=int([(i-1_pInt)*((maxByteOut/pReal)/materialpoint_sizeResults)+1_pInt, &
|
||||
min(i*((maxByteOut/pReal)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
|
||||
outputIndex=int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, &
|
||||
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
|
||||
call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
|
||||
[(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults]), &
|
||||
(outputIndex(2)-outputIndex(1)+1)*materialpoint_sizeResults,&
|
||||
|
|
|
@ -223,8 +223,7 @@ function hydrogenflux_cahnhilliard_getMobility33(ip,el)
|
|||
enddo
|
||||
|
||||
hydrogenflux_cahnhilliard_getMobility33 = &
|
||||
hydrogenflux_cahnhilliard_getMobility33/ &
|
||||
homogenization_Ngrains(mesh_element(3,el))
|
||||
hydrogenflux_cahnhilliard_getMobility33/real(homogenization_Ngrains(mesh_element(3,el)),pReal)
|
||||
|
||||
end function hydrogenflux_cahnhilliard_getMobility33
|
||||
|
||||
|
@ -258,8 +257,7 @@ function hydrogenflux_cahnhilliard_getDiffusion33(ip,el)
|
|||
enddo
|
||||
|
||||
hydrogenflux_cahnhilliard_getDiffusion33 = &
|
||||
hydrogenflux_cahnhilliard_getDiffusion33/ &
|
||||
homogenization_Ngrains(mesh_element(3,el))
|
||||
hydrogenflux_cahnhilliard_getDiffusion33/real(homogenization_Ngrains(mesh_element(3,el)),pReal)
|
||||
|
||||
end function hydrogenflux_cahnhilliard_getDiffusion33
|
||||
|
||||
|
@ -295,8 +293,7 @@ function hydrogenflux_cahnhilliard_getFormationEnergy(ip,el)
|
|||
enddo
|
||||
|
||||
hydrogenflux_cahnhilliard_getFormationEnergy = &
|
||||
hydrogenflux_cahnhilliard_getFormationEnergy/ &
|
||||
homogenization_Ngrains(mesh_element(3,el))
|
||||
hydrogenflux_cahnhilliard_getFormationEnergy/real(homogenization_Ngrains(mesh_element(3,el)),pReal)
|
||||
|
||||
end function hydrogenflux_cahnhilliard_getFormationEnergy
|
||||
|
||||
|
@ -331,10 +328,9 @@ function hydrogenflux_cahnhilliard_getEntropicCoeff(ip,el)
|
|||
lattice_hydrogenSurfaceEnergy(material_phase(grain,ip,el))
|
||||
enddo
|
||||
|
||||
hydrogenflux_cahnhilliard_getEntropicCoeff = &
|
||||
hydrogenflux_cahnhilliard_getEntropicCoeff* &
|
||||
hydrogenflux_cahnhilliard_getEntropicCoeff = hydrogenflux_cahnhilliard_getEntropicCoeff* &
|
||||
temperature(material_homog(ip,el))%p(thermalMapping(material_homog(ip,el))%p(ip,el))/ &
|
||||
homogenization_Ngrains(material_homog(ip,el))
|
||||
real(homogenization_Ngrains(material_homog(ip,el)),pReal)
|
||||
|
||||
end function hydrogenflux_cahnhilliard_getEntropicCoeff
|
||||
|
||||
|
@ -393,8 +389,8 @@ subroutine hydrogenflux_cahnhilliard_KinematicChemPotAndItsTangent(KPot, dKPot_d
|
|||
enddo
|
||||
enddo
|
||||
|
||||
KPot = KPot/homogenization_Ngrains(material_homog(ip,el))
|
||||
dKPot_dCh = dKPot_dCh/homogenization_Ngrains(material_homog(ip,el))
|
||||
KPot = KPot/real(homogenization_Ngrains(material_homog(ip,el)),pReal)
|
||||
dKPot_dCh = dKPot_dCh/real(homogenization_Ngrains(material_homog(ip,el)),pReal)
|
||||
|
||||
end subroutine hydrogenflux_cahnhilliard_KinematicChemPotAndItsTangent
|
||||
|
||||
|
|
|
@ -176,10 +176,6 @@ type(tSolutionState) function spectral_damage_solution(guess,timeinc,timeinc_old
|
|||
itmax, &
|
||||
err_damage_tolAbs, &
|
||||
err_damage_tolRel
|
||||
use spectral_utilities, only: &
|
||||
tBoundaryCondition, &
|
||||
Utilities_maskedCompliance, &
|
||||
Utilities_updateGamma
|
||||
use mesh, only: &
|
||||
grid, &
|
||||
grid3
|
||||
|
|
|
@ -61,6 +61,7 @@ subroutine DAMASK_interface_init(loadCaseParameterIn,geometryParameterIn)
|
|||
tag
|
||||
integer :: &
|
||||
i, &
|
||||
threadLevel, &
|
||||
worldrank = 0
|
||||
integer, allocatable, dimension(:) :: &
|
||||
chunkPos
|
||||
|
@ -73,15 +74,22 @@ subroutine DAMASK_interface_init(loadCaseParameterIn,geometryParameterIn)
|
|||
quit,&
|
||||
MPI_Comm_rank,&
|
||||
PETScInitialize, &
|
||||
MPI_Init_Thread, &
|
||||
MPI_abort
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! PETSc Init
|
||||
#ifdef PETSc
|
||||
#ifdef _OPENMP
|
||||
call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,ierr);CHKERRQ(ierr) ! in case of OpenMP, don't rely on PETScInitialize doing MPI init
|
||||
if (threadLevel<MPI_THREAD_FUNNELED) then
|
||||
write(6,'(a)') 'MPI library does not support OpenMP'
|
||||
call quit(1_pInt)
|
||||
endif
|
||||
#endif
|
||||
call PetscInitialize(PETSC_NULL_CHARACTER,ierr) ! according to PETSc manual, that should be the first line in the code
|
||||
CHKERRQ(ierr) ! this is a macro definition, it is case sensitive
|
||||
|
||||
open(6, encoding='UTF-8') ! modern fortran compilers (gfortran >4.4, ifort >11 support it)
|
||||
open(6, encoding='UTF-8')
|
||||
call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,ierr);CHKERRQ(ierr)
|
||||
#endif
|
||||
mainProcess: if (worldrank == 0) then
|
||||
|
@ -313,6 +321,7 @@ character(len=1024) function getGeometryFile(geometryParameter)
|
|||
integer :: posExt, posSep
|
||||
logical :: error
|
||||
character :: pathSep
|
||||
external :: quit
|
||||
|
||||
getGeometryFile = geometryParameter
|
||||
pathSep = getPathSep()
|
||||
|
@ -322,6 +331,7 @@ character(len=1024) function getGeometryFile(geometryParameter)
|
|||
if (posExt <= posSep) getGeometryFile = trim(getGeometryFile)//('.geom') ! no extension present
|
||||
if (scan(getGeometryFile,pathSep) /= 1) then ! relative path given as command line argument
|
||||
error = getcwd(cwd)
|
||||
if (error) call quit(1_pInt)
|
||||
getGeometryFile = rectifyPath(trim(cwd)//pathSep//getGeometryFile)
|
||||
else
|
||||
getGeometryFile = rectifyPath(getGeometryFile)
|
||||
|
@ -347,6 +357,7 @@ character(len=1024) function getLoadCaseFile(loadCaseParameter)
|
|||
integer :: posExt, posSep
|
||||
logical :: error
|
||||
character :: pathSep
|
||||
external :: quit
|
||||
|
||||
getLoadCaseFile = loadcaseParameter
|
||||
pathSep = getPathSep()
|
||||
|
@ -356,6 +367,7 @@ character(len=1024) function getLoadCaseFile(loadCaseParameter)
|
|||
if (posExt <= posSep) getLoadCaseFile = trim(getLoadCaseFile)//('.load') ! no extension present
|
||||
if (scan(getLoadCaseFile,pathSep) /= 1) then ! relative path given as command line argument
|
||||
error = getcwd(cwd)
|
||||
if (error) call quit(1_pInt)
|
||||
getLoadCaseFile = rectifyPath(trim(cwd)//pathSep//getLoadCaseFile)
|
||||
else
|
||||
getLoadCaseFile = rectifyPath(getLoadCaseFile)
|
||||
|
|
|
@ -178,10 +178,6 @@ type(tSolutionState) function spectral_thermal_solution(guess,timeinc,timeinc_ol
|
|||
itmax, &
|
||||
err_thermal_tolAbs, &
|
||||
err_thermal_tolRel
|
||||
use spectral_utilities, only: &
|
||||
tBoundaryCondition, &
|
||||
Utilities_maskedCompliance, &
|
||||
Utilities_updateGamma
|
||||
use mesh, only: &
|
||||
grid, &
|
||||
grid3
|
||||
|
|
|
@ -422,7 +422,7 @@ subroutine utilities_updateGamma(C,saveReference)
|
|||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset)
|
||||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||
temp33_complex(l,m) = sum(C_ref(l,1:3,m,1:3)*xiDyad_cmplx)
|
||||
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
|
||||
matA(1:3,1:3) = real(temp33_complex); matA(4:6,4:6) = real(temp33_complex)
|
||||
matA(1:3,4:6) = aimag(temp33_complex); matA(4:6,1:3) = -aimag(temp33_complex)
|
||||
if (abs(math_det33(matA(1:3,1:3))) > 1e-16) then
|
||||
|
@ -558,7 +558,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
|
|||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k))*xi1st(m,i,j,k)
|
||||
forall(l = 1_pInt:3_pInt, m = 1_pInt:3_pInt) &
|
||||
temp33_complex(l,m) = sum(C_ref(l,1:3,m,1:3)*xiDyad_cmplx)
|
||||
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
|
||||
matA(1:3,1:3) = real(temp33_complex); matA(4:6,4:6) = real(temp33_complex)
|
||||
matA(1:3,4:6) = aimag(temp33_complex); matA(4:6,1:3) = -aimag(temp33_complex)
|
||||
if (abs(math_det33(matA(1:3,1:3))) > 1e-16) then
|
||||
|
@ -610,8 +610,8 @@ subroutine utilities_fourierGreenConvolution(D_ref, mobility_ref, deltaT)
|
|||
! do the actual spectral method calculation
|
||||
do k = 1_pInt, grid3; do j = 1_pInt, grid(2) ;do i = 1_pInt, grid1Red
|
||||
GreenOp_hat = cmplx(1.0_pReal,0.0_pReal,pReal)/ &
|
||||
(cmplx(mobility_ref,0.0_pReal,pReal) + &
|
||||
deltaT*sum(conjg(xi1st(1:3,i,j,k))*matmul(D_ref,xi1st(1:3,i,j,k)))) ! why not use dot_product
|
||||
(cmplx(mobility_ref,0.0_pReal,pReal) + cmplx(deltaT,0.0_pReal)*&
|
||||
sum(conjg(xi1st(1:3,i,j,k))* matmul(cmplx(D_ref,0.0_pReal),xi1st(1:3,i,j,k)))) ! why not use dot_product
|
||||
scalarField_fourier(i,j,k) = scalarField_fourier(i,j,k)*GreenOp_hat
|
||||
enddo; enddo; enddo
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
fixed_seed 144921823
|
||||
analyticJaco 0
|
||||
fixed_seed 1697667030
|
||||
analyticJaco 1
|
||||
|
|
|
@ -69,7 +69,7 @@ def displacementFluctFFT(F,grid,size,nodal=False,transformed=False):
|
|||
#--------------------------------------------------------------------------------------------------
|
||||
# integration in Fourier space
|
||||
|
||||
displacement_fourier = -np.einsum('ijkml,ijkl,l->ijkm',
|
||||
displacement_fourier = +np.einsum('ijkml,ijkl,l->ijkm',
|
||||
F if transformed else np.fft.rfftn(F,axes=(0,1,2)),
|
||||
k_s,
|
||||
integrator,
|
||||
|
|
Loading…
Reference in New Issue