following Python notation

This commit is contained in:
Martin Diehl 2022-01-29 15:05:07 +01:00
parent 477f2d8167
commit 487912cfb0
7 changed files with 271 additions and 271 deletions

View File

@ -27,10 +27,10 @@ module discretization_grid
private private
integer, dimension(3), public, protected :: & integer, dimension(3), public, protected :: &
grid !< (global) grid cells !< (global) cells
integer, public, protected :: & integer, public, protected :: &
grid3, & !< (local) grid in 3rd direction cells3, & !< (local) cells in 3rd direction
grid3Offset !< (local) grid offset in 3rd direction cells3Offset !< (local) cells offset in 3rd direction
real(pReal), dimension(3), public, protected :: & real(pReal), dimension(3), public, protected :: &
geomSize !< (global) physical size geomSize !< (global) physical size
real(pReal), public, protected :: & real(pReal), public, protected :: &
@ -55,7 +55,7 @@ subroutine discretization_grid_init(restart)
mySize, & !< domain size of this process mySize, & !< domain size of this process
origin !< (global) distance to origin origin !< (global) distance to origin
integer, dimension(3) :: & integer, dimension(3) :: &
myGrid !< domain grid of this process myGrid !< domain cells of this process
integer, dimension(:), allocatable :: & integer, dimension(:), allocatable :: &
materialAt, materialAt_global materialAt, materialAt_global
@ -77,7 +77,7 @@ subroutine discretization_grid_init(restart)
if (worldrank == 0) then if (worldrank == 0) then
fileContent = IO_read(interface_geomFile) fileContent = IO_read(interface_geomFile)
call readVTI(grid,geomSize,origin,materialAt_global,fileContent) call readVTI(cells,geomSize,origin,materialAt_global,fileContent)
fname = interface_geomFile fname = interface_geomFile
if (scan(fname,'/') /= 0) fname = fname(scan(fname,'/',.true.)+1:) if (scan(fname,'/') /= 0) fname = fname(scan(fname,'/',.true.)+1:)
call results_openJobFile(parallel=.false.) call results_openJobFile(parallel=.false.)
@ -88,37 +88,37 @@ subroutine discretization_grid_init(restart)
end if end if
call MPI_Bcast(grid,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
print'(/,1x,a,3(i12,1x))', 'cells a b c: ', grid print'(/,1x,a,3(i12,1x))', 'cells a b c: ', cells
print '(1x,a,3(es12.5,1x))', 'size x y z: ', geomSize print '(1x,a,3(es12.5,1x))', 'size x y z: ', geomSize
print '(1x,a,3(es12.5,1x))', 'origin x y z: ', origin print '(1x,a,3(es12.5,1x))', 'origin x y z: ', origin
if (worldsize>grid(3)) call IO_error(894, ext_msg='number of processes exceeds grid(3)') if (worldsize>cells(3)) call IO_error(894, ext_msg='number of processes exceeds cells(3)')
call fftw_mpi_init call fftw_mpi_init
devNull = fftw_mpi_local_size_3d(int(grid(3),C_INTPTR_T), & devNull = fftw_mpi_local_size_3d(int(cells(3),C_INTPTR_T), &
int(grid(2),C_INTPTR_T), & int(cells(2),C_INTPTR_T), &
int(grid(1),C_INTPTR_T)/2+1, & int(cells(1),C_INTPTR_T)/2+1, &
PETSC_COMM_WORLD, & PETSC_COMM_WORLD, &
z, & ! domain grid size along z z, & ! domain cells size along z
z_offset) ! domain grid offset along z z_offset) ! domain cells offset along z
if (z==0_C_INTPTR_T) call IO_error(894, ext_msg='Cannot distribute MPI processes') if (z==0_C_INTPTR_T) call IO_error(894, ext_msg='Cannot distribute MPI processes')
grid3 = int(z) cells3 = int(z)
grid3Offset = int(z_offset) cells3Offset = int(z_offset)
size3 = geomSize(3)*real(grid3,pReal) /real(grid(3),pReal) size3 = geomSize(3)*real(cells3,pReal) /real(cells(3),pReal)
size3Offset = geomSize(3)*real(grid3Offset,pReal)/real(grid(3),pReal) size3Offset = geomSize(3)*real(cells3Offset,pReal)/real(cells(3),pReal)
myGrid = [grid(1:2),grid3] myGrid = [cells(1:2),cells3]
mySize = [geomSize(1:2),size3] mySize = [geomSize(1:2),size3]
call MPI_Gather(product(grid(1:2))*grid3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
@ -131,10 +131,10 @@ subroutine discretization_grid_init(restart)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call discretization_init(materialAt, & call discretization_init(materialAt, &
IPcoordinates0(myGrid,mySize,grid3Offset), & IPcoordinates0(myGrid,mySize,cells3Offset), &
Nodes0(myGrid,mySize,grid3Offset),& Nodes0(myGrid,mySize,cells3Offset),&
merge((grid(1)+1) * (grid(2)+1) * (grid3+1),& ! write top layer... merge((cells(1)+1) * (cells(2)+1) * (cells3+1),& ! write top layer...
(grid(1)+1) * (grid(2)+1) * grid3,& ! ...unless not last process (cells(1)+1) * (cells(2)+1) * cells3,& ! ...unless not last process
worldrank+1==worldsize)) worldrank+1==worldsize))
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -142,7 +142,7 @@ subroutine discretization_grid_init(restart)
if (.not. restart) then if (.not. restart) then
call results_openJobFile call results_openJobFile
call results_closeGroup(results_addGroup('geometry')) call results_closeGroup(results_addGroup('geometry'))
call results_addAttribute('cells', grid, '/geometry') call results_addAttribute('cells', cells, '/geometry')
call results_addAttribute('size', geomSize,'/geometry') call results_addAttribute('size', geomSize,'/geometry')
call results_addAttribute('origin',origin, '/geometry') call results_addAttribute('origin',origin, '/geometry')
call results_closeJobFile call results_closeJobFile
@ -170,11 +170,11 @@ end subroutine discretization_grid_init
!> @brief Parse vtk image data (.vti) !> @brief Parse vtk image data (.vti)
!> @details https://vtk.org/Wiki/VTK_XML_Formats !> @details https://vtk.org/Wiki/VTK_XML_Formats
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine readVTI(grid,geomSize,origin,material, & subroutine readVTI(cells,geomSize,origin,material, &
fileContent) fileContent)
integer, dimension(3), intent(out) :: & integer, dimension(3), intent(out) :: &
grid ! grid (across all processes!) cells ! cells (across all processes!)
real(pReal), dimension(3), intent(out) :: & real(pReal), dimension(3), intent(out) :: &
geomSize, & ! size (across all processes!) geomSize, & ! size (across all processes!)
origin ! origin (across all processes!) origin ! origin (across all processes!)
@ -190,7 +190,7 @@ subroutine readVTI(grid,geomSize,origin,material, &
s s
grid = -1 cells = -1
geomSize = -1.0_pReal geomSize = -1.0_pReal
inFile = .false. inFile = .false.
@ -215,7 +215,7 @@ subroutine readVTI(grid,geomSize,origin,material, &
if (.not. inImage) then if (.not. inImage) then
if (index(fileContent(startPos:endPos),'<ImageData',kind=pI64) /= 0_pI64) then if (index(fileContent(startPos:endPos),'<ImageData',kind=pI64) /= 0_pI64) then
inImage = .true. inImage = .true.
call cellsSizeOrigin(grid,geomSize,origin,fileContent(startPos:endPos)) call cellsSizeOrigin(cells,geomSize,origin,fileContent(startPos:endPos))
end if end if
else else
if (index(fileContent(startPos:endPos),'<CellData',kind=pI64) /= 0_pI64) then if (index(fileContent(startPos:endPos),'<CellData',kind=pI64) /= 0_pI64) then
@ -247,9 +247,9 @@ subroutine readVTI(grid,geomSize,origin,material, &
end do end do
if (.not. allocated(material)) call IO_error(error_ID = 844, ext_msg='material data not found') if (.not. allocated(material)) call IO_error(error_ID = 844, ext_msg='material data not found')
if (size(material) /= product(grid)) call IO_error(error_ID = 844, ext_msg='size(material)') if (size(material) /= product(cells)) call IO_error(error_ID = 844, ext_msg='size(material)')
if (any(geomSize<=0)) call IO_error(error_ID = 844, ext_msg='size') if (any(geomSize<=0)) call IO_error(error_ID = 844, ext_msg='size')
if (any(grid<1)) call IO_error(error_ID = 844, ext_msg='grid') if (any(cells<1)) call IO_error(error_ID = 844, ext_msg='cells')
material = material + 1 material = material + 1
if (any(material<1)) call IO_error(error_ID = 844, ext_msg='material ID < 0') if (any(material<1)) call IO_error(error_ID = 844, ext_msg='material ID < 0')
@ -502,13 +502,13 @@ end subroutine readVTI
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
!> @brief Calculate undeformed position of IPs/cell centers (pretend to be an element) !> @brief Calculate undeformed position of IPs/cell centers (pretend to be an element)
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
function IPcoordinates0(grid,geomSize,grid3Offset) function IPcoordinates0(cells,geomSize,cells3Offset)
integer, dimension(3), intent(in) :: grid ! grid (for this process!) integer, dimension(3), intent(in) :: cells ! cells (for this process!)
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!) real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
integer, intent(in) :: grid3Offset ! grid(3) offset integer, intent(in) :: cells3Offset ! cells(3) offset
real(pReal), dimension(3,product(grid)) :: ipCoordinates0 real(pReal), dimension(3,product(cells)) :: ipCoordinates0
integer :: & integer :: &
a,b,c, & a,b,c, &
@ -516,9 +516,9 @@ function IPcoordinates0(grid,geomSize,grid3Offset)
i = 0 i = 0
do c = 1, grid(3); do b = 1, grid(2); do a = 1, grid(1) do c = 1, cells(3); do b = 1, cells(2); do a = 1, cells(1)
i = i + 1 i = i + 1
IPcoordinates0(1:3,i) = geomSize/real(grid,pReal) * (real([a,b,grid3Offset+c],pReal) -0.5_pReal) IPcoordinates0(1:3,i) = geomSize/real(cells,pReal) * (real([a,b,cells3Offset+c],pReal) -0.5_pReal)
end do; end do; end do end do; end do; end do
end function IPcoordinates0 end function IPcoordinates0
@ -527,22 +527,22 @@ end function IPcoordinates0
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
!> @brief Calculate position of undeformed nodes (pretend to be an element) !> @brief Calculate position of undeformed nodes (pretend to be an element)
!--------------------------------------------------------------------------------------------------- !---------------------------------------------------------------------------------------------------
pure function nodes0(grid,geomSize,grid3Offset) pure function nodes0(cells,geomSize,cells3Offset)
integer, dimension(3), intent(in) :: grid ! grid (for this process!) integer, dimension(3), intent(in) :: cells ! cells (for this process!)
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!) real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
integer, intent(in) :: grid3Offset ! grid(3) offset integer, intent(in) :: cells3Offset ! cells(3) offset
real(pReal), dimension(3,product(grid+1)) :: nodes0 real(pReal), dimension(3,product(cells+1)) :: nodes0
integer :: & integer :: &
a,b,c, & a,b,c, &
n n
n = 0 n = 0
do c = 0, grid3; do b = 0, grid(2); do a = 0, grid(1) do c = 0, cells3; do b = 0, cells(2); do a = 0, cells(1)
n = n + 1 n = n + 1
nodes0(1:3,n) = geomSize/real(grid,pReal) * real([a,b,grid3Offset+c],pReal) nodes0(1:3,n) = geomSize/real(cells,pReal) * real([a,b,cells3Offset+c],pReal)
end do; end do; end do end do; end do; end do
end function nodes0 end function nodes0
@ -551,17 +551,17 @@ end function nodes0
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Calculate IP interface areas !> @brief Calculate IP interface areas
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure function cellSurfaceArea(geomSize,grid) pure function cellSurfaceArea(geomSize,cells)
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!) real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
integer, dimension(3), intent(in) :: grid ! grid (for this process!) integer, dimension(3), intent(in) :: cells ! cells (for this process!)
real(pReal), dimension(6,1,product(grid)) :: cellSurfaceArea real(pReal), dimension(6,1,product(cells)) :: cellSurfaceArea
cellSurfaceArea(1:2,1,:) = geomSize(2)/real(grid(2)) * geomSize(3)/real(grid(3)) cellSurfaceArea(1:2,1,:) = geomSize(2)/real(cells(2)) * geomSize(3)/real(cells(3))
cellSurfaceArea(3:4,1,:) = geomSize(3)/real(grid(3)) * geomSize(1)/real(grid(1)) cellSurfaceArea(3:4,1,:) = geomSize(3)/real(cells(3)) * geomSize(1)/real(cells(1))
cellSurfaceArea(5:6,1,:) = geomSize(1)/real(grid(1)) * geomSize(2)/real(grid(2)) cellSurfaceArea(5:6,1,:) = geomSize(1)/real(cells(1)) * geomSize(2)/real(cells(2))
end function cellSurfaceArea end function cellSurfaceArea
@ -588,42 +588,42 @@ end function cellSurfaceNormal
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Build IP neighborhood relations !> @brief Build IP neighborhood relations
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure function IPneighborhood(grid) pure function IPneighborhood(cells)
integer, dimension(3), intent(in) :: grid ! grid (for this process!) integer, dimension(3), intent(in) :: cells ! cells (for this process!)
integer, dimension(3,6,1,product(grid)) :: IPneighborhood !< 6 neighboring IPs as [element ID, IP ID, face ID] integer, dimension(3,6,1,product(cells)) :: IPneighborhood !< 6 neighboring IPs as [element ID, IP ID, face ID]
integer :: & integer :: &
x,y,z, & x,y,z, &
e e
e = 0 e = 0
do z = 0,grid(3)-1; do y = 0,grid(2)-1; do x = 0,grid(1)-1 do z = 0,cells(3)-1; do y = 0,cells(2)-1; do x = 0,cells(1)-1
e = e + 1 e = e + 1
! element ID ! element ID
IPneighborhood(1,1,1,e) = z * grid(1) * grid(2) & IPneighborhood(1,1,1,e) = z * cells(1) * cells(2) &
+ y * grid(1) & + y * cells(1) &
+ modulo(x+1,grid(1)) & + modulo(x+1,cells(1)) &
+ 1 + 1
IPneighborhood(1,2,1,e) = z * grid(1) * grid(2) & IPneighborhood(1,2,1,e) = z * cells(1) * cells(2) &
+ y * grid(1) & + y * cells(1) &
+ modulo(x-1,grid(1)) & + modulo(x-1,cells(1)) &
+ 1 + 1
IPneighborhood(1,3,1,e) = z * grid(1) * grid(2) & IPneighborhood(1,3,1,e) = z * cells(1) * cells(2) &
+ modulo(y+1,grid(2)) * grid(1) & + modulo(y+1,cells(2)) * cells(1) &
+ x & + x &
+ 1 + 1
IPneighborhood(1,4,1,e) = z * grid(1) * grid(2) & IPneighborhood(1,4,1,e) = z * cells(1) * cells(2) &
+ modulo(y-1,grid(2)) * grid(1) & + modulo(y-1,cells(2)) * cells(1) &
+ x & + x &
+ 1 + 1
IPneighborhood(1,5,1,e) = modulo(z+1,grid(3)) * grid(1) * grid(2) & IPneighborhood(1,5,1,e) = modulo(z+1,cells(3)) * cells(1) * cells(2) &
+ y * grid(1) & + y * cells(1) &
+ x & + x &
+ 1 + 1
IPneighborhood(1,6,1,e) = modulo(z-1,grid(3)) * grid(1) * grid(2) & IPneighborhood(1,6,1,e) = modulo(z-1,cells(3)) * cells(1) * cells(2) &
+ y * grid(1) & + y * cells(1) &
+ x & + x &
+ 1 + 1
! IP ID ! IP ID

View File

@ -106,9 +106,9 @@ subroutine grid_damage_spectral_init()
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! init fields ! init fields
allocate(phi_current(grid(1),grid(2),grid3), source=1.0_pReal) allocate(phi_current(cells(1),cells(2),cells3), source=1.0_pReal)
allocate(phi_lastInc(grid(1),grid(2),grid3), source=1.0_pReal) allocate(phi_lastInc(cells(1),cells(2),cells3), source=1.0_pReal)
allocate(phi_stagInc(grid(1),grid(2),grid3), source=1.0_pReal) allocate(phi_stagInc(cells(1),cells(2),cells3), source=1.0_pReal)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc ! initialize solver specific parts of PETSc
@ -117,23 +117,23 @@ subroutine grid_damage_spectral_init()
call SNESSetOptionsPrefix(SNES_damage,'damage_',err_PETSc) call SNESSetOptionsPrefix(SNES_damage,'damage_',err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
localK = 0_pPetscInt localK = 0_pPetscInt
localK(worldrank) = int(grid3,pPetscInt) localK(worldrank) = int(cells3,pPetscInt)
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap) 1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap)
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid [int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
damage_grid,err_PETSc) ! handle, error damage_grid,err_PETSc) ! handle, error
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetFromOptions(damage_grid,err_PETSc) call DMsetFromOptions(damage_grid,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetUp(damage_grid,err_PETSc) call DMsetUp(damage_grid,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc) ! global solution vector (cells x 1, i.e. every def grad tensor)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -213,7 +213,7 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! updating damage state ! updating damage state
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
call homogenization_set_phi(phi_current(i,j,k),ce) call homogenization_set_phi(phi_current(i,j,k),ce)
end do; end do; end do end do; end do; end do
@ -255,7 +255,7 @@ subroutine grid_damage_spectral_forward(cutBack)
call DMDAVecRestoreArrayF90(dm_local,solution_vec,phi_PETSc,err_PETSc) call DMDAVecRestoreArrayF90(dm_local,solution_vec,phi_PETSc,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
call homogenization_set_phi(phi_current(i,j,k),ce) call homogenization_set_phi(phi_current(i,j,k),ce)
end do; end do; end do end do; end do; end do
@ -289,12 +289,12 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! evaluate polarization field ! evaluate polarization field
scalarField_real = 0.0_pReal scalarField_real = 0.0_pReal
scalarField_real(1:grid(1),1:grid(2),1:grid3) = phi_current scalarField_real(1:cells(1),1:cells(2),1:cells3) = phi_current
call utilities_FFTscalarForward call utilities_FFTscalarForward
call utilities_fourierScalarGradient !< calculate gradient of damage field call utilities_fourierScalarGradient !< calculate gradient of damage field
call utilities_FFTvectorBackward call utilities_FFTvectorBackward
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
vectorField_real(1:3,i,j,k) = matmul(homogenization_K_phi(ce) - K_ref, vectorField_real(1:3,i,j,k)) vectorField_real(1:3,i,j,k) = matmul(homogenization_K_phi(ce) - K_ref, vectorField_real(1:3,i,j,k))
end do; end do; end do end do; end do; end do
@ -302,7 +302,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
call utilities_fourierVectorDivergence !< calculate damage divergence in fourier field call utilities_fourierVectorDivergence !< calculate damage divergence in fourier field
call utilities_FFTscalarBackward call utilities_FFTscalarBackward
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) & scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) &
+ homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi_current(i,j,k)) & + homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi_current(i,j,k)) &
@ -315,14 +315,14 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%Delta_t) call utilities_fourierGreenConvolution(K_ref, mu_ref, params%Delta_t)
call utilities_FFTscalarBackward call utilities_FFTscalarBackward
where(scalarField_real(1:grid(1),1:grid(2),1:grid3) > phi_lastInc) & where(scalarField_real(1:cells(1),1:cells(2),1:cells3) > phi_lastInc) &
scalarField_real(1:grid(1),1:grid(2),1:grid3) = phi_lastInc scalarField_real(1:cells(1),1:cells(2),1:cells3) = phi_lastInc
where(scalarField_real(1:grid(1),1:grid(2),1:grid3) < num%residualStiffness) & where(scalarField_real(1:cells(1),1:cells(2),1:cells3) < num%residualStiffness) &
scalarField_real(1:grid(1),1:grid(2),1:grid3) = num%residualStiffness scalarField_real(1:cells(1),1:cells(2),1:cells3) = num%residualStiffness
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! constructing residual ! constructing residual
r = scalarField_real(1:grid(1),1:grid(2),1:grid3) - phi_current r = scalarField_real(1:cells(1),1:cells(2),1:cells3) - phi_current
err_PETSc = 0 err_PETSc = 0
end subroutine formResidual end subroutine formResidual
@ -339,7 +339,7 @@ subroutine updateReference()
K_ref = 0.0_pReal K_ref = 0.0_pReal
mu_ref = 0.0_pReal mu_ref = 0.0_pReal
do ce = 1, product(grid(1:2))*grid3 do ce = 1, product(cells(1:2))*cells3
K_ref = K_ref + homogenization_K_phi(ce) K_ref = K_ref + homogenization_K_phi(ce)
mu_ref = mu_ref + homogenization_mu_phi(ce) mu_ref = mu_ref + homogenization_mu_phi(ce)
end do end do

View File

@ -153,9 +153,9 @@ subroutine grid_mechanical_FEM_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! allocate global fields ! allocate global fields
allocate(F (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(P_current (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(P_current (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_lastInc (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc ! initialize solver specific parts of PETSc
@ -164,16 +164,16 @@ subroutine grid_mechanical_FEM_init
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc) call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
localK = 0_pPetscInt localK = 0_pPetscInt
localK(worldrank) = int(grid3,pPetscInt) localK(worldrank) = int(cells3,pPetscInt)
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
DMDA_STENCIL_BOX, & DMDA_STENCIL_BOX, &
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap) 3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap)
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid [int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
mechanical_grid,err_PETSc) mechanical_grid,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetFromOptions(mechanical_grid,err_PETSc) call DMsetFromOptions(mechanical_grid,err_PETSc)
@ -214,7 +214,7 @@ subroutine grid_mechanical_FEM_init
call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
delta = geomSize/real(grid,pReal) ! grid spacing delta = geomSize/real(cells,pReal) ! grid spacing
detJ = product(delta) ! cell volume detJ = product(delta) ! cell volume
BMat = reshape(real([-1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), & BMat = reshape(real([-1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), &
@ -255,11 +255,11 @@ subroutine grid_mechanical_FEM_init
call HDF5_read(u_lastInc,groupHandle,'u_lastInc') call HDF5_read(u_lastInc,groupHandle,'u_lastInc')
elseif (interface_restartInc == 0) then restartRead elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
F = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) F = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3)
endif restartRead endif restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(F) call utilities_updateCoords(F)
call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2
F, & ! target F F, & ! target F
@ -386,7 +386,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
F_lastInc = F F_lastInc = F
homogenization_F0 = reshape(F, [3,3,product(grid(1:2))*grid3]) homogenization_F0 = reshape(F, [3,3,product(cells(1:2))*cells3])
endif endif
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -556,13 +556,13 @@ subroutine formResidual(da_local,x_local, &
! get deformation gradient ! get deformation gradient
call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc) call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1) do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
ctr = 0 ctr = 0
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0 do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
ctr = ctr + 1 ctr = ctr + 1
x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk) x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk)
enddo; enddo; enddo enddo; enddo; enddo
F(1:3,1:3,i,j,k-grid3offset) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem)) F(1:3,1:3,i,j,k-cells3Offset) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem))
enddo; enddo; enddo enddo; enddo; enddo
call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc) call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -589,14 +589,14 @@ subroutine formResidual(da_local,x_local, &
call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc) call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
ele = 0 ele = 0
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1) do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
ctr = 0 ctr = 0
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0 do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
ctr = ctr + 1 ctr = ctr + 1
x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk) x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk)
enddo; enddo; enddo enddo; enddo; enddo
ele = ele + 1 ele = ele + 1
f_elem = matmul(transpose(BMat),transpose(P_current(1:3,1:3,i,j,k-grid3offset)))*detJ + & f_elem = matmul(transpose(BMat),transpose(P_current(1:3,1:3,i,j,k-cells3Offset)))*detJ + &
matmul(HGMat,x_elem)*(homogenization_dPdF(1,1,1,1,ele) + & matmul(HGMat,x_elem)*(homogenization_dPdF(1,1,1,1,ele) + &
homogenization_dPdF(2,2,2,2,ele) + & homogenization_dPdF(2,2,2,2,ele) + &
homogenization_dPdF(3,3,3,3,ele))/3.0_pReal homogenization_dPdF(3,3,3,3,ele))/3.0_pReal
@ -615,17 +615,17 @@ subroutine formResidual(da_local,x_local, &
! applying boundary conditions ! applying boundary conditions
call DMDAVecGetArrayF90(da_local,f_local,r,err_PETSc) call DMDAVecGetArrayF90(da_local,f_local,r,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
if (grid3offset == 0) then if (cells3Offset == 0) then
r(0:2,0, 0, 0) = 0.0_pReal r(0:2,0, 0, 0) = 0.0_pReal
r(0:2,grid(1),0, 0) = 0.0_pReal r(0:2,cells(1),0, 0) = 0.0_pReal
r(0:2,0, grid(2),0) = 0.0_pReal r(0:2,0, cells(2),0) = 0.0_pReal
r(0:2,grid(1),grid(2),0) = 0.0_pReal r(0:2,cells(1),cells(2),0) = 0.0_pReal
end if end if
if (grid3+grid3offset == grid(3)) then if (cells3+cells3Offset == cells(3)) then
r(0:2,0, 0, grid(3)) = 0.0_pReal r(0:2,0, 0, cells(3)) = 0.0_pReal
r(0:2,grid(1),0, grid(3)) = 0.0_pReal r(0:2,cells(1),0, cells(3)) = 0.0_pReal
r(0:2,0, grid(2),grid(3)) = 0.0_pReal r(0:2,0, cells(2),cells(3)) = 0.0_pReal
r(0:2,grid(1),grid(2),grid(3)) = 0.0_pReal r(0:2,cells(1),cells(2),cells(3)) = 0.0_pReal
end if end if
call DMDAVecRestoreArrayF90(da_local,f_local,r,err_PETSc) call DMDAVecRestoreArrayF90(da_local,f_local,r,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -663,7 +663,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc)
call MatZeroEntries(Jac,err_PETSc) call MatZeroEntries(Jac,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
ce = 0 ce = 0
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1) do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
ctr = 0 ctr = 0
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0 do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
ctr = ctr + 1 ctr = ctr + 1
@ -719,7 +719,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc)
call DMDAVecGetArrayF90(da_local,coordinates,x_scal,err_PETSc) call DMDAVecGetArrayF90(da_local,coordinates,x_scal,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
ce = 0 ce = 0
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1) do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
ce = ce + 1 ce = ce + 1
x_scal(0:2,i-1,j-1,k-1) = discretization_IPcoords(1:3,ce) x_scal(0:2,i-1,j-1,k-1) = discretization_IPcoords(1:3,ce)
enddo; enddo; enddo enddo; enddo; enddo

View File

@ -96,7 +96,7 @@ contains
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine grid_mechanical_spectral_basic_init subroutine grid_mechanical_spectral_basic_init
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: P
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
PetscScalar, pointer, dimension(:,:,:,:) :: & PetscScalar, pointer, dimension(:,:,:,:) :: &
@ -153,8 +153,8 @@ subroutine grid_mechanical_spectral_basic_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! allocate global fields ! allocate global fields
allocate(F_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_lastInc(3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(Fdot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc ! initialize solver specific parts of PETSc
@ -163,23 +163,23 @@ subroutine grid_mechanical_spectral_basic_init
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc) call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
localK = 0_pPetscInt localK = 0_pPetscInt
localK(worldrank) = int(grid3,pPetscInt) localK(worldrank) = int(cells3,pPetscInt)
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap) 9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap)
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid [int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
da,err_PETSc) ! handle, error da,err_PETSc) ! handle, error
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetFromOptions(da,err_PETSc) call DMsetFromOptions(da,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetUp(da,err_PETSc) call DMsetUp(da,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (grid x 9, i.e. every def grad tensor) call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (cells x 9, i.e. every def grad tensor)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -217,11 +217,11 @@ subroutine grid_mechanical_spectral_basic_init
call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
elseif (interface_restartInc == 0) then restartRead elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3]) F = reshape(F_lastInc,[9,cells(1),cells(2),cells3])
end if restartRead end if restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(reshape(F,shape(F_lastInc))) call utilities_updateCoords(reshape(F,shape(F_lastInc)))
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F reshape(F,shape(F_lastInc)), & ! target F
@ -343,11 +343,11 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
end if end if
Fdot = utilities_calculateRate(guess, & Fdot = utilities_calculateRate(guess, &
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),Delta_t_old, & F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, &
rotation_BC%rotate(F_aimDot,active=.true.)) rotation_BC%rotate(F_aimDot,active=.true.))
F_lastInc = reshape(F,[3,3,grid(1),grid(2),grid3]) F_lastInc = reshape(F,[3,3,cells(1),cells(2),cells3])
homogenization_F0 = reshape(F,[3,3,product(grid(1:2))*grid3]) homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -359,7 +359,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
+ merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t + merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3]) rotation_BC%rotate(F_aim,active=.true.)),[9,cells(1),cells(2),cells3])
call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc) call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -530,7 +530,7 @@ subroutine formResidual(in, F, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! updated deformation gradient using fix point algorithm of basic scheme ! updated deformation gradient using fix point algorithm of basic scheme
tensorField_real = 0.0_pReal tensorField_real = 0.0_pReal
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = r ! store fPK field for subsequent FFT forward transform tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = r ! store fPK field for subsequent FFT forward transform
call utilities_FFTtensorForward ! FFT forward of global "tensorField_real" call utilities_FFTtensorForward ! FFT forward of global "tensorField_real"
err_div = utilities_divergenceRMS() ! divRMS of tensorField_fourier for later use err_div = utilities_divergenceRMS() ! divRMS of tensorField_fourier for later use
call utilities_fourierGammaConvolution(params%rotation_BC%rotate(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier call utilities_fourierGammaConvolution(params%rotation_BC%rotate(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier
@ -538,7 +538,7 @@ subroutine formResidual(in, F, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! constructing residual ! constructing residual
r = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too r = tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too
end subroutine formResidual end subroutine formResidual

View File

@ -107,7 +107,7 @@ contains
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine grid_mechanical_spectral_polarisation_init subroutine grid_mechanical_spectral_polarisation_init
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: P
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
PetscScalar, pointer, dimension(:,:,:,:) :: & PetscScalar, pointer, dimension(:,:,:,:) :: &
@ -171,10 +171,10 @@ subroutine grid_mechanical_spectral_polarisation_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! allocate global fields ! allocate global fields
allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_lastInc (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(Fdot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(F_tau_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_tau_lastInc(3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
allocate(F_tauDot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal) allocate(F_tauDot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! initialize solver specific parts of PETSc ! initialize solver specific parts of PETSc
@ -183,23 +183,23 @@ subroutine grid_mechanical_spectral_polarisation_init
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc) call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
localK = 0_pPetscInt localK = 0_pPetscInt
localK(worldrank) = int(grid3,pPetscInt) localK(worldrank) = int(cells3,pPetscInt)
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap) 18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap)
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid [int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
da,err_PETSc) ! handle, error da,err_PETSc) ! handle, error
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetFromOptions(da,err_PETSc) call DMsetFromOptions(da,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetUp(da,err_PETSc) call DMsetUp(da,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (grid x 18, i.e. every def grad tensor) call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (cells x 18, i.e. every def grad tensor)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -241,13 +241,13 @@ subroutine grid_mechanical_spectral_polarisation_init
call HDF5_read(F_tau_lastInc,groupHandle,'F_tau_lastInc') call HDF5_read(F_tau_lastInc,groupHandle,'F_tau_lastInc')
elseif (interface_restartInc == 0) then restartRead elseif (interface_restartInc == 0) then restartRead
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3]) F = reshape(F_lastInc,[9,cells(1),cells(2),cells3])
F_tau = 2.0_pReal*F F_tau = 2.0_pReal*F
F_tau_lastInc = 2.0_pReal*F_lastInc F_tau_lastInc = 2.0_pReal*F_lastInc
end if restartRead end if restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(reshape(F,shape(F_lastInc))) call utilities_updateCoords(reshape(F,shape(F_lastInc)))
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F reshape(F,shape(F_lastInc)), & ! target F
@ -379,15 +379,15 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
end if end if
Fdot = utilities_calculateRate(guess, & Fdot = utilities_calculateRate(guess, &
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),Delta_t_old, & F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, &
rotation_BC%rotate(F_aimDot,active=.true.)) rotation_BC%rotate(F_aimDot,active=.true.))
F_tauDot = utilities_calculateRate(guess, & F_tauDot = utilities_calculateRate(guess, &
F_tau_lastInc,reshape(F_tau,[3,3,grid(1),grid(2),grid3]), Delta_t_old, & F_tau_lastInc,reshape(F_tau,[3,3,cells(1),cells(2),cells3]), Delta_t_old, &
rotation_BC%rotate(F_aimDot,active=.true.)) rotation_BC%rotate(F_aimDot,active=.true.))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3]) F_lastInc = reshape(F, [3,3,cells(1),cells(2),cells3])
F_tau_lastInc = reshape(F_tau,[3,3,grid(1),grid(2),grid3]) F_tau_lastInc = reshape(F_tau,[3,3,cells(1),cells(2),cells3])
homogenization_F0 = reshape(F,[3,3,product(grid(1:2))*grid3]) homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -400,12 +400,12 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
rotation_BC%rotate(F_aim,active=.true.)),& rotation_BC%rotate(F_aim,active=.true.)),&
[9,grid(1),grid(2),grid3]) [9,cells(1),cells(2),cells3])
if (guess) then if (guess) then
F_tau = reshape(Utilities_forwardField(Delta_t,F_tau_lastInc,F_taudot), & F_tau = reshape(Utilities_forwardField(Delta_t,F_tau_lastInc,F_taudot), &
[9,grid(1),grid(2),grid3]) ! does not have any average value as boundary condition [9,cells(1),cells(2),cells3]) ! does not have any average value as boundary condition
else else
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
F_lambda33 = reshape(F_tau(1:9,i,j,k)-F(1:9,i,j,k),[3,3]) F_lambda33 = reshape(F_tau(1:9,i,j,k)-F(1:9,i,j,k),[3,3])
F_lambda33 = math_I3 & F_lambda33 = math_I3 &
+ math_mul3333xx33(S_scale,0.5_pReal*matmul(F_lambda33, & + math_mul3333xx33(S_scale,0.5_pReal*matmul(F_lambda33, &
@ -597,7 +597,7 @@ subroutine formResidual(in, FandF_tau, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! !
tensorField_real = 0.0_pReal tensorField_real = 0.0_pReal
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
tensorField_real(1:3,1:3,i,j,k) = & tensorField_real(1:3,1:3,i,j,k) = &
num%beta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -& num%beta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
num%alpha*matmul(F(1:3,1:3,i,j,k), & num%alpha*matmul(F(1:3,1:3,i,j,k), &
@ -612,7 +612,7 @@ subroutine formResidual(in, FandF_tau, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! constructing residual ! constructing residual
r_F_tau = num%beta*F - tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) r_F_tau = num%beta*F - tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! evaluate constitutive response ! evaluate constitutive response
@ -629,14 +629,14 @@ subroutine formResidual(in, FandF_tau, &
params%stress_mask))) params%stress_mask)))
! calculate divergence ! calculate divergence
tensorField_real = 0.0_pReal tensorField_real = 0.0_pReal
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = r_F !< stress field in disguise tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = r_F !< stress field in disguise
call utilities_FFTtensorForward call utilities_FFTtensorForward
err_div = utilities_divergenceRMS() !< root mean squared error in divergence of stress err_div = utilities_divergenceRMS() !< root mean squared error in divergence of stress
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! constructing residual ! constructing residual
e = 0 e = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
e = e + 1 e = e + 1
r_F(1:3,1:3,i,j,k) = & r_F(1:3,1:3,i,j,k) = &
math_mul3333xx33(math_invSym3333(homogenization_dPdF(1:3,1:3,1:3,1:3,e) + C_scale), & math_mul3333xx33(math_invSym3333(homogenization_dPdF(1:3,1:3,1:3,1:3,e) + C_scale), &
@ -648,7 +648,7 @@ subroutine formResidual(in, FandF_tau, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculating curl ! calculating curl
tensorField_real = 0.0_pReal tensorField_real = 0.0_pReal
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = F tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = F
call utilities_FFTtensorForward call utilities_FFTtensorForward
err_curl = utilities_curlRMS() err_curl = utilities_curlRMS()

View File

@ -101,12 +101,12 @@ subroutine grid_thermal_spectral_init(T_0)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! init fields ! init fields
allocate(T_current(grid(1),grid(2),grid3), source=T_0) allocate(T_current(cells(1),cells(2),cells3), source=T_0)
allocate(T_lastInc(grid(1),grid(2),grid3), source=T_0) allocate(T_lastInc(cells(1),cells(2),cells3), source=T_0)
allocate(T_stagInc(grid(1),grid(2),grid3), source=T_0) allocate(T_stagInc(cells(1),cells(2),cells3), source=T_0)
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
call homogenization_thermal_setField(T_0,0.0_pReal,ce) call homogenization_thermal_setField(T_0,0.0_pReal,ce)
end do; end do; end do end do; end do; end do
@ -118,23 +118,23 @@ subroutine grid_thermal_spectral_init(T_0)
call SNESSetOptionsPrefix(SNES_thermal,'thermal_',err_PETSc) call SNESSetOptionsPrefix(SNES_thermal,'thermal_',err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
localK = 0_pPetscInt localK = 0_pPetscInt
localK(worldrank) = int(grid3,pPetscInt) localK(worldrank) = int(cells3,pPetscInt)
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap) 1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap)
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid [int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
thermal_grid,err_PETSc) ! handle, error thermal_grid,err_PETSc) ! handle, error
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetFromOptions(thermal_grid,err_PETSc) call DMsetFromOptions(thermal_grid,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMsetUp(thermal_grid,err_PETSc) call DMsetUp(thermal_grid,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (cells x 1, i.e. every def grad tensor)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -198,7 +198,7 @@ function grid_thermal_spectral_solution(Delta_t) result(solution)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! updating thermal state ! updating thermal state
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce) call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
end do; end do; end do end do; end do; end do
@ -241,7 +241,7 @@ subroutine grid_thermal_spectral_forward(cutBack)
call DMDAVecRestoreArrayF90(dm_local,solution_vec,T_PETSc,err_PETSc) call DMDAVecRestoreArrayF90(dm_local,solution_vec,T_PETSc,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce) call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
end do; end do; end do end do; end do; end do
@ -274,12 +274,12 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! evaluate polarization field ! evaluate polarization field
scalarField_real = 0.0_pReal scalarField_real = 0.0_pReal
scalarField_real(1:grid(1),1:grid(2),1:grid3) = T_current scalarField_real(1:cells(1),1:cells(2),1:cells3) = T_current
call utilities_FFTscalarForward call utilities_FFTscalarForward
call utilities_fourierScalarGradient !< calculate gradient of temperature field call utilities_fourierScalarGradient !< calculate gradient of temperature field
call utilities_FFTvectorBackward call utilities_FFTvectorBackward
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
vectorField_real(1:3,i,j,k) = matmul(homogenization_K_T(ce) - K_ref, vectorField_real(1:3,i,j,k)) vectorField_real(1:3,i,j,k) = matmul(homogenization_K_T(ce) - K_ref, vectorField_real(1:3,i,j,k))
end do; end do; end do end do; end do; end do
@ -287,7 +287,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
call utilities_fourierVectorDivergence !< calculate temperature divergence in fourier field call utilities_fourierVectorDivergence !< calculate temperature divergence in fourier field
call utilities_FFTscalarBackward call utilities_FFTscalarBackward
ce = 0 ce = 0
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_T(ce)) & scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_T(ce)) &
+ homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T_current(i,j,k)) & + homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T_current(i,j,k)) &
@ -302,7 +302,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! constructing residual ! constructing residual
r = T_current - scalarField_real(1:grid(1),1:grid(2),1:grid3) r = T_current - scalarField_real(1:cells(1),1:cells(2),1:cells3)
err_PETSc = 0 err_PETSc = 0
end subroutine formResidual end subroutine formResidual
@ -319,7 +319,7 @@ subroutine updateReference()
K_ref = 0.0_pReal K_ref = 0.0_pReal
mu_ref = 0.0_pReal mu_ref = 0.0_pReal
do ce = 1, product(grid(1:2))*grid3 do ce = 1, product(cells(1:2))*cells3
K_ref = K_ref + homogenization_K_T(ce) K_ref = K_ref + homogenization_K_T(ce)
mu_ref = mu_ref + homogenization_mu_T(ce) mu_ref = mu_ref + homogenization_mu_T(ce)
end do end do

View File

@ -29,9 +29,9 @@ module spectral_utilities
include 'fftw3-mpi.f03' include 'fftw3-mpi.f03'
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! grid related information information ! grid related information
real(pReal), protected, public :: wgt !< weighting factor 1/Nelems real(pReal), protected, public :: wgt !< weighting factor 1/Nelems
integer, protected, public :: grid1Red !< grid(1)/2 integer, protected, public :: grid1Red !< cells(1)/2
real(pReal), protected, public, dimension(3) :: scaledGeomSize !< scaled geometry size for calculation of divergence real(pReal), protected, public, dimension(3) :: scaledGeomSize !< scaled geometry size for calculation of divergence
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -201,8 +201,8 @@ subroutine spectral_utilities_init
num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc) num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
grid1Red = grid(1)/2 + 1 grid1Red = cells(1)/2 + 1
wgt = 1.0/real(product(grid),pReal) wgt = 1.0/real(product(cells),pReal)
num%memory_efficient = num_grid%get_asInt('memory_efficient', defaultVal=1) > 0 ! ToDo: should be logical in YAML file num%memory_efficient = num_grid%get_asInt('memory_efficient', defaultVal=1) > 0 ! ToDo: should be logical in YAML file
num%divergence_correction = num_grid%get_asInt('divergence_correction', defaultVal=2) num%divergence_correction = num_grid%get_asInt('divergence_correction', defaultVal=2)
@ -231,9 +231,9 @@ subroutine spectral_utilities_init
enddo enddo
elseif (num%divergence_correction == 2) then elseif (num%divergence_correction == 2) then
do j = 1, 3 do j = 1, 3
if ( j /= int(minloc(geomSize/real(grid,pReal),1)) & if ( j /= int(minloc(geomSize/real(cells,pReal),1)) &
.and. j /= int(maxloc(geomSize/real(grid,pReal),1))) & .and. j /= int(maxloc(geomSize/real(cells,pReal),1))) &
scaledGeomSize = geomSize/geomSize(j)*real(grid(j),pReal) scaledGeomSize = geomSize/geomSize(j)*real(cells(j),pReal)
enddo enddo
else else
scaledGeomSize = geomSize scaledGeomSize = geomSize
@ -262,11 +262,11 @@ subroutine spectral_utilities_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! MPI allocation ! MPI allocation
gridFFTW = int(grid,C_INTPTR_T) gridFFTW = int(cells,C_INTPTR_T)
alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, & alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, &
PETSC_COMM_WORLD, local_K, local_K_offset) PETSC_COMM_WORLD, local_K, local_K_offset)
allocate (xi1st (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension allocate (xi1st (3,grid1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension
allocate (xi2nd (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension allocate (xi2nd (3,grid1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension
tensorField = fftw_alloc_complex(tensorSize*alloc_local) tensorField = fftw_alloc_complex(tensorSize*alloc_local)
call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, & call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, &
@ -327,27 +327,27 @@ subroutine spectral_utilities_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculation of discrete angular frequencies, ordered as in FFTW (wrap around) ! calculation of discrete angular frequencies, ordered as in FFTW (wrap around)
do k = grid3Offset+1, grid3Offset+grid3 do k = cells3Offset+1, cells3Offset+cells3
k_s(3) = k - 1 k_s(3) = k - 1
if (k > grid(3)/2 + 1) k_s(3) = k_s(3) - grid(3) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1 if (k > cells(3)/2 + 1) k_s(3) = k_s(3) - cells(3) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
do j = 1, grid(2) do j = 1, cells(2)
k_s(2) = j - 1 k_s(2) = j - 1
if (j > grid(2)/2 + 1) k_s(2) = k_s(2) - grid(2) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1 if (j > cells(2)/2 + 1) k_s(2) = k_s(2) - cells(2) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
do i = 1, grid1Red do i = 1, grid1Red
k_s(1) = i - 1 ! symmetry, junst running from 0,1,...,N/2,N/2+1 k_s(1) = i - 1 ! symmetry, junst running from 0,1,...,N/2,N/2+1
xi2nd(1:3,i,j,k-grid3Offset) = utilities_getFreqDerivative(k_s) xi2nd(1:3,i,j,k-cells3Offset) = utilities_getFreqDerivative(k_s)
where(mod(grid,2)==0 .and. [i,j,k] == grid/2+1 .and. & where(mod(cells,2)==0 .and. [i,j,k] == cells/2+1 .and. &
spectral_derivative_ID == DERIVATIVE_CONTINUOUS_ID) ! for even grids, set the Nyquist Freq component to 0.0 spectral_derivative_ID == DERIVATIVE_CONTINUOUS_ID) ! for even grids, set the Nyquist Freq component to 0.0
xi1st(1:3,i,j,k-grid3Offset) = cmplx(0.0_pReal,0.0_pReal,pReal) xi1st(1:3,i,j,k-cells3Offset) = cmplx(0.0_pReal,0.0_pReal,pReal)
elsewhere elsewhere
xi1st(1:3,i,j,k-grid3Offset) = xi2nd(1:3,i,j,k-grid3Offset) xi1st(1:3,i,j,k-cells3Offset) = xi2nd(1:3,i,j,k-cells3Offset)
endwhere endwhere
enddo; enddo; enddo enddo; enddo; enddo
if (num%memory_efficient) then ! allocate just single fourth order tensor if (num%memory_efficient) then ! allocate just single fourth order tensor
allocate (gamma_hat(3,3,3,3,1,1,1), source = cmplx(0.0_pReal,0.0_pReal,pReal)) allocate (gamma_hat(3,3,3,3,1,1,1), source = cmplx(0.0_pReal,0.0_pReal,pReal))
else ! precalculation of gamma_hat field else ! precalculation of gamma_hat field
allocate (gamma_hat(3,3,3,3,grid1Red,grid(2),grid3), source = cmplx(0.0_pReal,0.0_pReal,pReal)) allocate (gamma_hat(3,3,3,3,grid1Red,cells(2),cells3), source = cmplx(0.0_pReal,0.0_pReal,pReal))
endif endif
end subroutine spectral_utilities_init end subroutine spectral_utilities_init
@ -373,10 +373,10 @@ subroutine utilities_updateGamma(C)
if (.not. num%memory_efficient) then if (.not. num%memory_efficient) then
gamma_hat = cmplx(0.0_pReal,0.0_pReal,pReal) ! for the singular point and any non invertible A gamma_hat = cmplx(0.0_pReal,0.0_pReal,pReal) ! for the singular point and any non invertible A
do k = grid3Offset+1, grid3Offset+grid3; do j = 1, grid(2); do i = 1, grid1Red do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, grid1Red
if (any([i,j,k] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1 if (any([i,j,k] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
do concurrent (l = 1:3, m = 1:3) do concurrent (l = 1:3, m = 1:3)
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset) xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-cells3Offset))*xi1st(m,i,j,k-cells3Offset)
end do end do
do concurrent(l = 1:3, m = 1:3) do concurrent(l = 1:3, m = 1:3)
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx) temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
@ -387,8 +387,8 @@ subroutine utilities_updateGamma(C)
call math_invert(A_inv, err, A) call math_invert(A_inv, err, A)
temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal) temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal)
do concurrent(l=1:3, m=1:3, n=1:3, o=1:3) do concurrent(l=1:3, m=1:3, n=1:3, o=1:3)
gamma_hat(l,m,n,o,i,j,k-grid3Offset) = temp33_complex(l,n)* & gamma_hat(l,m,n,o,i,j,k-cells3Offset) = temp33_complex(l,n)* &
conjg(-xi1st(o,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset) conjg(-xi1st(o,i,j,k-cells3Offset))*xi1st(m,i,j,k-cells3Offset)
end do end do
end if end if
end if end if
@ -405,7 +405,7 @@ end subroutine utilities_updateGamma
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_FFTtensorForward subroutine utilities_FFTtensorForward
tensorField_real(1:3,1:3,grid(1)+1:grid1Red*2,:,:) = 0.0_pReal tensorField_real(1:3,1:3,cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier) call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier)
end subroutine utilities_FFTtensorForward end subroutine utilities_FFTtensorForward
@ -429,7 +429,7 @@ end subroutine utilities_FFTtensorBackward
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_FFTscalarForward subroutine utilities_FFTscalarForward
scalarField_real(grid(1)+1:grid1Red*2,:,:) = 0.0_pReal scalarField_real(cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier) call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier)
end subroutine utilities_FFTscalarForward end subroutine utilities_FFTscalarForward
@ -454,7 +454,7 @@ end subroutine utilities_FFTscalarBackward
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_FFTvectorForward subroutine utilities_FFTvectorForward
vectorField_real(1:3,grid(1)+1:grid1Red*2,:,:) = 0.0_pReal vectorField_real(1:3,cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier) call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier)
end subroutine utilities_FFTvectorForward end subroutine utilities_FFTvectorForward
@ -493,8 +493,8 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! do the actual spectral method calculation (mechanical equilibrium) ! do the actual spectral method calculation (mechanical equilibrium)
memoryEfficient: if (num%memory_efficient) then memoryEfficient: if (num%memory_efficient) then
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1, grid1Red
if (any([i,j,k+grid3Offset] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1 if (any([i,j,k+cells3Offset] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
do concurrent(l = 1:3, m = 1:3) do concurrent(l = 1:3, m = 1:3)
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k))*xi1st(m,i,j,k) xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k))*xi1st(m,i,j,k)
end do end do
@ -519,7 +519,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
end if end if
end do; end do; end do end do; end do; end do
else memoryEfficient else memoryEfficient
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
do concurrent(l = 1:3, m = 1:3) do concurrent(l = 1:3, m = 1:3)
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3,i,j,k) * tensorField_fourier(1:3,1:3,i,j,k)) temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3,i,j,k) * tensorField_fourier(1:3,1:3,i,j,k))
end do end do
@ -527,7 +527,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
end do; end do; end do end do; end do; end do
end if memoryEfficient end if memoryEfficient
if (grid3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal) if (cells3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal)
end subroutine utilities_fourierGammaConvolution end subroutine utilities_fourierGammaConvolution
@ -544,7 +544,7 @@ subroutine utilities_fourierGreenConvolution(D_ref, mu_ref, Delta_t)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! do the actual spectral method calculation ! do the actual spectral method calculation
do k = 1, grid3; do j = 1, grid(2) ;do i = 1, grid1Red do k = 1, cells3; do j = 1, cells(2) ;do i = 1, grid1Red
GreenOp_hat = cmplx(1.0_pReal,0.0_pReal,pReal) & GreenOp_hat = cmplx(1.0_pReal,0.0_pReal,pReal) &
/ (cmplx(mu_ref,0.0_pReal,pReal) + cmplx(Delta_t,0.0_pReal) & / (cmplx(mu_ref,0.0_pReal,pReal) + cmplx(Delta_t,0.0_pReal) &
* sum(conjg(xi1st(1:3,i,j,k))* matmul(cmplx(D_ref,0.0_pReal),xi1st(1:3,i,j,k)))) * sum(conjg(xi1st(1:3,i,j,k))* matmul(cmplx(D_ref,0.0_pReal),xi1st(1:3,i,j,k))))
@ -571,7 +571,7 @@ real(pReal) function utilities_divergenceRMS()
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculating RMS divergence criterion in Fourier space ! calculating RMS divergence criterion in Fourier space
utilities_divergenceRMS = 0.0_pReal utilities_divergenceRMS = 0.0_pReal
do k = 1, grid3; do j = 1, grid(2) do k = 1, cells3; do j = 1, cells(2)
do i = 2, grid1Red -1 ! Has somewhere a conj. complex counterpart. Therefore count it twice. do i = 2, grid1Red -1 ! Has somewhere a conj. complex counterpart. Therefore count it twice.
utilities_divergenceRMS = utilities_divergenceRMS & utilities_divergenceRMS = utilities_divergenceRMS &
+ 2.0_pReal*(sum (real(matmul(tensorField_fourier(1:3,1:3,i,j,k), & ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2, i.e. do not take square root and square again + 2.0_pReal*(sum (real(matmul(tensorField_fourier(1:3,1:3,i,j,k), & ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2, i.e. do not take square root and square again
@ -579,7 +579,7 @@ real(pReal) function utilities_divergenceRMS()
+sum(aimag(matmul(tensorField_fourier(1:3,1:3,i,j,k),& +sum(aimag(matmul(tensorField_fourier(1:3,1:3,i,j,k),&
conjg(-xi1st(1:3,i,j,k))*rescaledGeom))**2)) conjg(-xi1st(1:3,i,j,k))*rescaledGeom))**2))
enddo enddo
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if grid(1) /= 1) utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if cells(1) /= 1)
+ sum( real(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), & + sum( real(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), &
conjg(-xi1st(1:3,1,j,k))*rescaledGeom))**2) & conjg(-xi1st(1:3,1,j,k))*rescaledGeom))**2) &
+ sum(aimag(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), & + sum(aimag(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), &
@ -589,7 +589,7 @@ real(pReal) function utilities_divergenceRMS()
+ sum(aimag(matmul(tensorField_fourier(1:3,1:3,grid1Red,j,k), & + sum(aimag(matmul(tensorField_fourier(1:3,1:3,grid1Red,j,k), &
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2) conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2)
enddo; enddo enddo; enddo
if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of cells(1) == 1
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
@ -616,7 +616,7 @@ real(pReal) function utilities_curlRMS()
! calculating max curl criterion in Fourier space ! calculating max curl criterion in Fourier space
utilities_curlRMS = 0.0_pReal utilities_curlRMS = 0.0_pReal
do k = 1, grid3; do j = 1, grid(2); do k = 1, cells3; do j = 1, cells(2);
do i = 2, grid1Red - 1 do i = 2, grid1Red - 1
do l = 1, 3 do l = 1, 3
curl_fourier(l,1) = (+tensorField_fourier(l,3,i,j,k)*xi1st(2,i,j,k)*rescaledGeom(2) & curl_fourier(l,1) = (+tensorField_fourier(l,3,i,j,k)*xi1st(2,i,j,k)*rescaledGeom(2) &
@ -638,7 +638,7 @@ real(pReal) function utilities_curlRMS()
-tensorField_fourier(l,1,1,j,k)*xi1st(2,1,j,k)*rescaledGeom(2)) -tensorField_fourier(l,1,1,j,k)*xi1st(2,1,j,k)*rescaledGeom(2))
enddo enddo
utilities_curlRMS = utilities_curlRMS & utilities_curlRMS = utilities_curlRMS &
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (DC) does not have a conjugate complex counterpart (if grid(1) /= 1) + sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (DC) does not have a conjugate complex counterpart (if cells(1) /= 1)
do l = 1, 3 do l = 1, 3
curl_fourier = (+tensorField_fourier(l,3,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2) & curl_fourier = (+tensorField_fourier(l,3,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2) &
-tensorField_fourier(l,2,grid1Red,j,k)*xi1st(3,grid1Red,j,k)*rescaledGeom(3)) -tensorField_fourier(l,2,grid1Red,j,k)*xi1st(3,grid1Red,j,k)*rescaledGeom(3))
@ -648,13 +648,13 @@ real(pReal) function utilities_curlRMS()
-tensorField_fourier(l,1,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2)) -tensorField_fourier(l,1,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2))
enddo enddo
utilities_curlRMS = utilities_curlRMS & utilities_curlRMS = utilities_curlRMS &
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) + sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if cells(1) /= 1)
enddo; enddo enddo; enddo
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of cells(1) == 1
end function utilities_curlRMS end function utilities_curlRMS
@ -736,7 +736,7 @@ subroutine utilities_fourierScalarGradient()
integer :: i, j, k integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
vectorField_fourier(1:3,i,j,k) = scalarField_fourier(i,j,k)*xi1st(1:3,i,j,k) ! ToDo: no -conjg? vectorField_fourier(1:3,i,j,k) = scalarField_fourier(i,j,k)*xi1st(1:3,i,j,k) ! ToDo: no -conjg?
enddo; enddo; enddo enddo; enddo; enddo
@ -750,7 +750,7 @@ subroutine utilities_fourierVectorDivergence()
integer :: i, j, k integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
scalarField_fourier(i,j,k) = sum(vectorField_fourier(1:3,i,j,k)*conjg(-xi1st(1:3,i,j,k))) scalarField_fourier(i,j,k) = sum(vectorField_fourier(1:3,i,j,k)*conjg(-xi1st(1:3,i,j,k)))
enddo; enddo; enddo enddo; enddo; enddo
@ -764,7 +764,7 @@ subroutine utilities_fourierVectorGradient()
integer :: i, j, k, m, n integer :: i, j, k, m, n
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
do m = 1, 3; do n = 1, 3 do m = 1, 3; do n = 1, 3
tensorField_fourier(m,n,i,j,k) = vectorField_fourier(m,i,j,k)*xi1st(n,i,j,k) tensorField_fourier(m,n,i,j,k) = vectorField_fourier(m,i,j,k)*xi1st(n,i,j,k)
enddo; enddo enddo; enddo
@ -780,7 +780,7 @@ subroutine utilities_fourierTensorDivergence()
integer :: i, j, k integer :: i, j, k
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
vectorField_fourier(:,i,j,k) = matmul(tensorField_fourier(:,:,i,j,k),conjg(-xi1st(:,i,j,k))) vectorField_fourier(:,i,j,k) = matmul(tensorField_fourier(:,:,i,j,k),conjg(-xi1st(:,i,j,k)))
enddo; enddo; enddo enddo; enddo; enddo
@ -795,8 +795,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress
real(pReal), intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress real(pReal), intent(out), dimension(3,3,cells(1),cells(2),cells3) :: P !< PK stress
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: F !< deformation gradient target
real(pReal), intent(in) :: Delta_t !< loading time real(pReal), intent(in) :: Delta_t !< loading time
type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame
@ -810,15 +810,15 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
print'(/,1x,a)', '... evaluating constitutive response ......................................' print'(/,1x,a)', '... evaluating constitutive response ......................................'
flush(IO_STDOUT) flush(IO_STDOUT)
homogenization_F = reshape(F,[3,3,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
call homogenization_mechanical_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3]) ! calculate P field call homogenization_mechanical_response(Delta_t,[1,1],[1,product(cells(1:2))*cells3]) ! calculate P field
if (.not. terminallyIll) & if (.not. terminallyIll) &
call homogenization_thermal_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3]) call homogenization_thermal_response(Delta_t,[1,1],[1,product(cells(1:2))*cells3])
if (.not. terminallyIll) & if (.not. terminallyIll) &
call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(grid(1:2))*grid3]) call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(cells(1:2))*cells3])
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
@ -833,7 +833,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
dPdF_norm_max = 0.0_pReal dPdF_norm_max = 0.0_pReal
dPdF_min = huge(1.0_pReal) dPdF_min = huge(1.0_pReal)
dPdF_norm_min = huge(1.0_pReal) dPdF_norm_min = huge(1.0_pReal)
do i = 1, product(grid(1:2))*grid3 do i = 1, product(cells(1:2))*cells3
if (dPdF_norm_max < sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then if (dPdF_norm_max < sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
dPdF_max = homogenization_dPdF(1:3,1:3,1:3,1:3,i) dPdF_max = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
dPdF_norm_max = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2) dPdF_norm_max = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
@ -878,16 +878,16 @@ pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
dt !< Delta_t between field0 and field dt !< Delta_t between field0 and field
logical, intent(in) :: & logical, intent(in) :: &
heterogeneous !< calculate field of rates heterogeneous !< calculate field of rates
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: & real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field0, & !< data of previous step field0, & !< data of previous step
field !< data of current step field !< data of current step
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: & real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_calculateRate utilities_calculateRate
if (heterogeneous) then if (heterogeneous) then
utilities_calculateRate = (field-field0) / dt utilities_calculateRate = (field-field0) / dt
else else
utilities_calculateRate = spread(spread(spread(avRate,3,grid(1)),4,grid(2)),5,grid3) utilities_calculateRate = spread(spread(spread(avRate,3,cells(1)),4,cells(2)),5,cells3)
endif endif
end function utilities_calculateRate end function utilities_calculateRate
@ -901,12 +901,12 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
real(pReal), intent(in) :: & real(pReal), intent(in) :: &
Delta_t !< Delta_t of current step Delta_t !< Delta_t of current step
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: & real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field_lastInc, & !< initial field field_lastInc, & !< initial field
rate !< rate by which to forward rate !< rate by which to forward
real(pReal), intent(in), optional, dimension(3,3) :: & real(pReal), intent(in), optional, dimension(3,3) :: &
aim !< average field value aim aim !< average field value aim
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: & real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_forwardField utilities_forwardField
real(pReal), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim real(pReal), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
@ -918,7 +918,7 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
fieldDiff = fieldDiff - aim fieldDiff = fieldDiff - aim
utilities_forwardField = utilities_forwardField - & utilities_forwardField = utilities_forwardField - &
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
endif endif
end function utilities_forwardField end function utilities_forwardField
@ -939,34 +939,34 @@ pure function utilities_getFreqDerivative(k_s)
utilities_getFreqDerivative = cmplx(0.0_pReal, TAU*real(k_s,pReal)/geomSize,pReal) utilities_getFreqDerivative = cmplx(0.0_pReal, TAU*real(k_s,pReal)/geomSize,pReal)
case (DERIVATIVE_CENTRAL_DIFF_ID) case (DERIVATIVE_CENTRAL_DIFF_ID)
utilities_getFreqDerivative = cmplx(0.0_pReal, sin(TAU*real(k_s,pReal)/real(grid,pReal)), pReal)/ & utilities_getFreqDerivative = cmplx(0.0_pReal, sin(TAU*real(k_s,pReal)/real(cells,pReal)), pReal)/ &
cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal) cmplx(2.0_pReal*geomSize/real(cells,pReal), 0.0_pReal, pReal)
case (DERIVATIVE_FWBW_DIFF_ID) case (DERIVATIVE_FWBW_DIFF_ID)
utilities_getFreqDerivative(1) = & utilities_getFreqDerivative(1) = &
cmplx(cos(TAU*real(k_s(1),pReal)/real(grid(1),pReal)) - 1.0_pReal, & cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) - 1.0_pReal, &
sin(TAU*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(2),pReal)/real(grid(2),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal) cmplx(4.0_pReal*geomSize(1)/real(cells(1),pReal), 0.0_pReal, pReal)
utilities_getFreqDerivative(2) = & utilities_getFreqDerivative(2) = &
cmplx(cos(TAU*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(2),pReal)/real(grid(2),pReal)) - 1.0_pReal, & cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) - 1.0_pReal, &
sin(TAU*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal) cmplx(4.0_pReal*geomSize(2)/real(cells(2),pReal), 0.0_pReal, pReal)
utilities_getFreqDerivative(3) = & utilities_getFreqDerivative(3) = &
cmplx(cos(TAU*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* & sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(2),pReal)/real(grid(2),pReal)) + 1.0_pReal, & cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) + 1.0_pReal, &
sin(TAU*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* & sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
cmplx(cos(TAU*real(k_s(3),pReal)/real(grid(3),pReal)) - 1.0_pReal, & cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) - 1.0_pReal, &
sin(TAU*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ & sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal) cmplx(4.0_pReal*geomSize(3)/real(cells(3),pReal), 0.0_pReal, pReal)
end select end select
end function utilities_getFreqDerivative end function utilities_getFreqDerivative
@ -979,10 +979,10 @@ end function utilities_getFreqDerivative
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_updateCoords(F) subroutine utilities_updateCoords(F)
real(pReal), dimension(3,3,grid(1),grid(2),grid3), intent(in) :: F real(pReal), dimension(3,3,cells(1),cells(2),cells3), intent(in) :: F
real(pReal), dimension(3, grid(1),grid(2),grid3) :: IPcoords real(pReal), dimension(3, cells(1),cells(2),cells3) :: IPcoords
real(pReal), dimension(3, grid(1),grid(2),grid3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI) real(pReal), dimension(3, cells(1),cells(2),cells3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI)
real(pReal), dimension(3, grid(1)+1,grid(2)+1,grid3+1) :: nodeCoords real(pReal), dimension(3, cells(1)+1,cells(2)+1,cells3+1) :: nodeCoords
integer :: & integer :: &
i,j,k,n, & i,j,k,n, &
c c
@ -1010,14 +1010,14 @@ subroutine utilities_updateCoords(F)
1, 1, 1, & 1, 1, 1, &
0, 1, 1 ], [3,8]) 0, 1, 1 ], [3,8])
step = geomSize/real(grid, pReal) step = geomSize/real(cells, pReal)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! integration in Fourier space to get fluctuations of cell center discplacements ! integration in Fourier space to get fluctuations of cell center discplacements
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = F tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = F
call utilities_FFTtensorForward() call utilities_FFTtensorForward()
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid1Red do k = 1, cells3; do j = 1, cells(2); do i = 1, grid1Red
if (any([i,j,k+grid3Offset] /= 1)) then if (any([i,j,k+cells3Offset] /= 1)) then
vectorField_fourier(1:3,i,j,k) = matmul(tensorField_fourier(1:3,1:3,i,j,k),xi2nd(1:3,i,j,k)) & vectorField_fourier(1:3,i,j,k) = matmul(tensorField_fourier(1:3,1:3,i,j,k),xi2nd(1:3,i,j,k)) &
/ sum(conjg(-xi2nd(1:3,i,j,k))*xi2nd(1:3,i,j,k)) * cmplx(wgt,0.0,pReal) / sum(conjg(-xi2nd(1:3,i,j,k))*xi2nd(1:3,i,j,k)) * cmplx(wgt,0.0,pReal)
else else
@ -1029,13 +1029,13 @@ subroutine utilities_updateCoords(F)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! average F ! average F
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt if (cells3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! pad cell center fluctuations along z-direction (needed when running MPI simulation) ! pad cell center fluctuations along z-direction (needed when running MPI simulation)
IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3) IPfluct_padded(1:3,1:cells(1),1:cells(2),2:cells3+1) = vectorField_real(1:3,1:cells(1),1:cells(2),1:cells3)
c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer
rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize) rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize)
rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize) rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize)
@ -1043,11 +1043,11 @@ subroutine utilities_updateCoords(F)
! send bottom layer to process below ! send bottom layer to process below
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI) call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI) call MPI_Irecv(IPfluct_padded(:,:,:,cells3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
! send top layer to process above ! send top layer to process above
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI) call MPI_Isend(IPfluct_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI) call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
@ -1063,24 +1063,24 @@ subroutine utilities_updateCoords(F)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculate nodal displacements ! calculate nodal displacements
nodeCoords = 0.0_pReal nodeCoords = 0.0_pReal
do k = 0,grid3; do j = 0,grid(2); do i = 0,grid(1) do k = 0,cells3; do j = 0,cells(2); do i = 0,cells(1)
nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+grid3Offset],pReal))) nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+cells3Offset],pReal)))
averageFluct: do n = 1,8 averageFluct: do n = 1,8
me = [i+neighbor(1,n),j+neighbor(2,n),k+neighbor(3,n)] me = [i+neighbor(1,n),j+neighbor(2,n),k+neighbor(3,n)]
nodeCoords(1:3,i+1,j+1,k+1) = nodeCoords(1:3,i+1,j+1,k+1) & nodeCoords(1:3,i+1,j+1,k+1) = nodeCoords(1:3,i+1,j+1,k+1) &
+ IPfluct_padded(1:3,modulo(me(1)-1,grid(1))+1,modulo(me(2)-1,grid(2))+1,me(3)+1)*0.125_pReal + IPfluct_padded(1:3,modulo(me(1)-1,cells(1))+1,modulo(me(2)-1,cells(2))+1,me(3)+1)*0.125_pReal
enddo averageFluct enddo averageFluct
enddo; enddo; enddo enddo; enddo; enddo
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! calculate cell center displacements ! calculate cell center displacements
do k = 1,grid3; do j = 1,grid(2); do i = 1,grid(1) do k = 1,cells3; do j = 1,cells(2); do i = 1,cells(1)
IPcoords(1:3,i,j,k) = vectorField_real(1:3,i,j,k) & IPcoords(1:3,i,j,k) = vectorField_real(1:3,i,j,k) &
+ matmul(Favg,step*(real([i,j,k+grid3Offset],pReal)-0.5_pReal)) + matmul(Favg,step*(real([i,j,k+cells3Offset],pReal)-0.5_pReal))
enddo; enddo; enddo enddo; enddo; enddo
call discretization_setNodeCoords(reshape(NodeCoords,[3,(grid(1)+1)*(grid(2)+1)*(grid3+1)])) call discretization_setNodeCoords(reshape(NodeCoords,[3,(cells(1)+1)*(cells(2)+1)*(cells3+1)]))
call discretization_setIPcoords (reshape(IPcoords, [3,grid(1)*grid(2)*grid3])) call discretization_setIPcoords (reshape(IPcoords, [3,cells(1)*cells(2)*cells3]))
end subroutine utilities_updateCoords end subroutine utilities_updateCoords