From a514d300607889baac65ea9d119170b864235011 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 26 May 2022 16:12:13 +0200 Subject: [PATCH 1/5] standardized names --- src/grid/discretization_grid.f90 | 8 ++--- src/grid/spectral_utilities.f90 | 58 ++++++++++++++++---------------- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index ddb36a246..901d35a9a 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -107,10 +107,8 @@ subroutine discretization_grid_init(restart) if (worldsize>cells(3)) call IO_error(894, ext_msg='number of processes exceeds cells(3)') - call fftw_mpi_init - devNull = fftw_mpi_local_size_3d(int(cells(3),C_INTPTR_T), & - int(cells(2),C_INTPTR_T), & - int(cells(1),C_INTPTR_T)/2+1, & + call fftw_mpi_init() + devNull = fftw_mpi_local_size_3d(int(cells(3),C_INTPTR_T),int(cells(2),C_INTPTR_T),int(cells(1)/2+1,C_INTPTR_T), & PETSC_COMM_WORLD, & z, & ! domain cells size along z z_offset) ! domain cells offset along z @@ -123,7 +121,7 @@ subroutine discretization_grid_init(restart) myGrid = [cells(1:2),cells3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& + call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index c8321f83f..7b712c7dd 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -152,8 +152,8 @@ subroutine spectral_utilities_init() tensorField, & !< field containing data for FFTW in real and fourier space (in place) vectorField, & !< field containing data for FFTW in real space when debugging FFTW (no in place) scalarField !< field containing data for FFTW in real space when debugging FFTW (no in place) - integer(C_INTPTR_T), dimension(3) :: gridFFTW - integer(C_INTPTR_T) :: alloc_local, local_K, local_K_offset + integer(C_INTPTR_T), dimension(3) :: cellsFFTW + integer(C_INTPTR_T) :: alloc_local, z, devNull integer(C_INTPTR_T), parameter :: & scalarSize = 1_C_INTPTR_T, & vectorSize = 3_C_INTPTR_T, & @@ -262,68 +262,68 @@ subroutine spectral_utilities_init() !-------------------------------------------------------------------------------------------------- ! MPI allocation - gridFFTW = int(cells,C_INTPTR_T) - alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, & - PETSC_COMM_WORLD, local_K, local_K_offset) + cellsFFTW = int(cells,C_INTPTR_T) + alloc_local = fftw_mpi_local_size_3d(cellsFFTW(3), cellsFFTW(2), cellsFFTW(1)/2 +1, & + PETSC_COMM_WORLD, z, devNull) allocate (xi1st (3,cells1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension allocate (xi2nd (3,cells1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension tensorField = fftw_alloc_complex(tensorSize*alloc_local) call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, & - 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real tensor representation + 2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),cellsFFTW(2),z]) ! place a pointer for a real tensor representation call c_f_pointer(tensorField, tensorField_fourier, [3_C_INTPTR_T,3_C_INTPTR_T, & - gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K]) ! place a pointer for a fourier tensor representation + cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , cellsFFTW(2),z]) ! place a pointer for a fourier tensor representation vectorField = fftw_alloc_complex(vectorSize*alloc_local) call c_f_pointer(vectorField, vectorField_real, [3_C_INTPTR_T,& - 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real vector representation + 2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),cellsFFTW(2),z]) ! place a pointer for a real vector representation call c_f_pointer(vectorField, vectorField_fourier,[3_C_INTPTR_T,& - gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a fourier vector representation + cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, cellsFFTW(2),z]) ! place a pointer for a fourier vector representation scalarField = fftw_alloc_complex(scalarSize*alloc_local) ! allocate data for real representation (no in place transform) call c_f_pointer(scalarField, scalarField_real, & - [2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1),gridFFTW(2),local_K]) ! place a pointer for a real scalar representation + [2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1),cellsFFTW(2),z]) ! place a pointer for a real scalar representation call c_f_pointer(scalarField, scalarField_fourier, & - [ gridFFTW(1)/2_C_INTPTR_T + 1 ,gridFFTW(2),local_K]) ! place a pointer for a fourier scarlar representation + [ cellsFFTW(1)/2_C_INTPTR_T + 1 ,cellsFFTW(2),z]) ! place a pointer for a fourier scarlar representation !-------------------------------------------------------------------------------------------------- ! tensor MPI fftw plans - planTensorForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),tensorSize, & + planTensorForth = fftw_mpi_plan_many_dft_r2c(3,cellsFFTW(3:1:-1),tensorSize, & FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & tensorField_real,tensorField_fourier, & PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planTensorForth)) error stop 'FFTW error' - planTensorBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),tensorSize, & - FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + if (.not. c_associated(planTensorForth)) error stop 'FFTW error r2c tensor' + planTensorBack = fftw_mpi_plan_many_dft_c2r(3,cellsFFTW(3:1:-1),tensorSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & tensorField_fourier,tensorField_real, & - PETSC_COMM_WORLD, FFTW_planner_flag) - if (.not. c_associated(planTensorBack)) error stop 'FFTW error' + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planTensorBack)) error stop 'FFTW error c2r tensor' !-------------------------------------------------------------------------------------------------- ! vector MPI fftw plans - planVectorForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),vectorSize, & + planVectorForth = fftw_mpi_plan_many_dft_r2c(3,cellsFFTW(3:1:-1),vectorSize, & FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & vectorField_real,vectorField_fourier, & PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planVectorForth)) error stop 'FFTW error' - planVectorBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),vectorSize, & - FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + if (.not. c_associated(planVectorForth)) error stop 'FFTW error r2c vector' + planVectorBack = fftw_mpi_plan_many_dft_c2r(3,cellsFFTW(3:1:-1),vectorSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & vectorField_fourier,vectorField_real, & - PETSC_COMM_WORLD, FFTW_planner_flag) - if (.not. c_associated(planVectorBack)) error stop 'FFTW error' + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planVectorBack)) error stop 'FFTW error c2r vector' !-------------------------------------------------------------------------------------------------- ! scalar MPI fftw plans - planScalarForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),scalarSize, & + planScalarForth = fftw_mpi_plan_dft_r2c(3,cellsFFTW(3:1:-1),scalarSize, & FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & scalarField_real,scalarField_fourier, & PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planScalarForth)) error stop 'FFTW error' - planScalarBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),scalarSize, & - FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + if (.not. c_associated(planScalarForth)) error stop 'FFTW error r2c scalar' + planScalarBack = fftw_mpi_plan_many_dft_c2r(3,cellsFFTW(3:1:-1),scalarSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & scalarField_fourier,scalarField_real, & - PETSC_COMM_WORLD, FFTW_planner_flag) - if (.not. c_associated(planScalarBack)) error stop 'FFTW error' + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planScalarBack)) error stop 'FFTW error c2r scalar' !-------------------------------------------------------------------------------------------------- ! calculation of discrete angular frequencies, ordered as in FFTW (wrap around) From 82cad00f8199b6fa19f76d9beb13b3c1c486b928 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 26 May 2022 19:07:23 +0200 Subject: [PATCH 2/5] simplified interface --- src/grid/spectral_utilities.f90 | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 7b712c7dd..5c8936b14 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -297,7 +297,7 @@ subroutine spectral_utilities_init() FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & tensorField_fourier,tensorField_real, & PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planTensorBack)) error stop 'FFTW error c2r tensor' + if (.not. c_associated(planTensorBack)) error stop 'FFTW error c2r tensor' !-------------------------------------------------------------------------------------------------- ! vector MPI fftw plans @@ -310,20 +310,18 @@ subroutine spectral_utilities_init() FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & vectorField_fourier,vectorField_real, & PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planVectorBack)) error stop 'FFTW error c2r vector' + if (.not. c_associated(planVectorBack)) error stop 'FFTW error c2r vector' !-------------------------------------------------------------------------------------------------- ! scalar MPI fftw plans - planScalarForth = fftw_mpi_plan_dft_r2c(3,cellsFFTW(3:1:-1),scalarSize, & - FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & - scalarField_real,scalarField_fourier, & - PETSC_COMM_WORLD,FFTW_planner_flag) + planScalarForth = fftw_mpi_plan_dft_r2c_3d(cellsFFTW(3),cellsFFTW(2),cellsFFTW(1), & + scalarField_real,scalarField_fourier, & + PETSC_COMM_WORLD,FFTW_planner_flag) if (.not. c_associated(planScalarForth)) error stop 'FFTW error r2c scalar' - planScalarBack = fftw_mpi_plan_many_dft_c2r(3,cellsFFTW(3:1:-1),scalarSize, & - FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & - scalarField_fourier,scalarField_real, & - PETSC_COMM_WORLD,FFTW_planner_flag) - if (.not. c_associated(planScalarBack)) error stop 'FFTW error c2r scalar' + planScalarBack = fftw_mpi_plan_dft_c2r_3d(cellsFFTW(3),cellsFFTW(2),cellsFFTW(1), & + scalarField_fourier,scalarField_real, & + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planScalarBack)) error stop 'FFTW error c2r scalar' !-------------------------------------------------------------------------------------------------- ! calculation of discrete angular frequencies, ordered as in FFTW (wrap around) From 19b78cb647fc10a30d470d2e3f5e31981c208453 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 26 May 2022 19:15:25 +0200 Subject: [PATCH 3/5] easier to read --- src/grid/spectral_utilities.f90 | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 5c8936b14..9f3062d89 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -269,22 +269,22 @@ subroutine spectral_utilities_init() allocate (xi2nd (3,cells1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension tensorField = fftw_alloc_complex(tensorSize*alloc_local) - call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, & - 2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),cellsFFTW(2),z]) ! place a pointer for a real tensor representation - call c_f_pointer(tensorField, tensorField_fourier, [3_C_INTPTR_T,3_C_INTPTR_T, & - cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , cellsFFTW(2),z]) ! place a pointer for a fourier tensor representation + call c_f_pointer(tensorField,tensorField_real, & + [3_C_INTPTR_T,3_C_INTPTR_T,2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) + call c_f_pointer(tensorField,tensorField_fourier, & + [3_C_INTPTR_T,3_C_INTPTR_T, cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T, cellsFFTW(2),z]) vectorField = fftw_alloc_complex(vectorSize*alloc_local) - call c_f_pointer(vectorField, vectorField_real, [3_C_INTPTR_T,& - 2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),cellsFFTW(2),z]) ! place a pointer for a real vector representation - call c_f_pointer(vectorField, vectorField_fourier,[3_C_INTPTR_T,& - cellsFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, cellsFFTW(2),z]) ! place a pointer for a fourier vector representation + call c_f_pointer(vectorField,vectorField_real, & + [3_C_INTPTR_T,2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) + call c_f_pointer(vectorField,vectorField_fourier, & + [3_C_INTPTR_T, cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T, cellsFFTW(2),z]) - scalarField = fftw_alloc_complex(scalarSize*alloc_local) ! allocate data for real representation (no in place transform) - call c_f_pointer(scalarField, scalarField_real, & - [2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T + 1),cellsFFTW(2),z]) ! place a pointer for a real scalar representation - call c_f_pointer(scalarField, scalarField_fourier, & - [ cellsFFTW(1)/2_C_INTPTR_T + 1 ,cellsFFTW(2),z]) ! place a pointer for a fourier scarlar representation + scalarField = fftw_alloc_complex(scalarSize*alloc_local) + call c_f_pointer(scalarField,scalarField_real, & + [2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) + call c_f_pointer(scalarField,scalarField_fourier, & + [ cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T, cellsFFTW(2),z]) !-------------------------------------------------------------------------------------------------- ! tensor MPI fftw plans From 064b9c5cc4294842acd27684c1716e673c2ebba3 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 27 May 2022 00:14:00 +0200 Subject: [PATCH 4/5] bugfix: prevent segmentation fault need to rely on FFTWs calculation for amount of storage needed --- src/grid/spectral_utilities.f90 | 25 ++++++++++++++++--------- src/parallelization.f90 | 2 +- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 9f3062d89..4e7bc2583 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -153,9 +153,8 @@ subroutine spectral_utilities_init() vectorField, & !< field containing data for FFTW in real space when debugging FFTW (no in place) scalarField !< field containing data for FFTW in real space when debugging FFTW (no in place) integer(C_INTPTR_T), dimension(3) :: cellsFFTW - integer(C_INTPTR_T) :: alloc_local, z, devNull + integer(C_INTPTR_T) :: N, z, devNull integer(C_INTPTR_T), parameter :: & - scalarSize = 1_C_INTPTR_T, & vectorSize = 3_C_INTPTR_T, & tensorSize = 9_C_INTPTR_T character(len=*), parameter :: & @@ -261,26 +260,34 @@ subroutine spectral_utilities_init() print'(/,1x,a)', 'FFTW initialized'; flush(IO_STDOUT) !-------------------------------------------------------------------------------------------------- -! MPI allocation - cellsFFTW = int(cells,C_INTPTR_T) - alloc_local = fftw_mpi_local_size_3d(cellsFFTW(3), cellsFFTW(2), cellsFFTW(1)/2 +1, & - PETSC_COMM_WORLD, z, devNull) +! allocation allocate (xi1st (3,cells1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension allocate (xi2nd (3,cells1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension - tensorField = fftw_alloc_complex(tensorSize*alloc_local) + cellsFFTW = int(cells,C_INTPTR_T) + + N = fftw_mpi_local_size_many(3,[cellsFFTW(3),cellsFFTW(2),cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T],& + tensorSize,FFTW_MPI_DEFAULT_BLOCK,PETSC_COMM_WORLD,z,devNull) + if (z /= cells3) error stop 'domain decomposition mismatch (tensor)' + tensorField = fftw_alloc_complex(N) call c_f_pointer(tensorField,tensorField_real, & [3_C_INTPTR_T,3_C_INTPTR_T,2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) call c_f_pointer(tensorField,tensorField_fourier, & [3_C_INTPTR_T,3_C_INTPTR_T, cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T, cellsFFTW(2),z]) - vectorField = fftw_alloc_complex(vectorSize*alloc_local) + N = fftw_mpi_local_size_many(3,[cellsFFTW(3),cellsFFTW(2),cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T],& + vectorSize,FFTW_MPI_DEFAULT_BLOCK,PETSC_COMM_WORLD,z,devNull) + if (z /= cells3) error stop 'domain decomposition mismatch (vector)' + vectorField = fftw_alloc_complex(N) call c_f_pointer(vectorField,vectorField_real, & [3_C_INTPTR_T,2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) call c_f_pointer(vectorField,vectorField_fourier, & [3_C_INTPTR_T, cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T, cellsFFTW(2),z]) - scalarField = fftw_alloc_complex(scalarSize*alloc_local) + N = fftw_mpi_local_size_3d(cellsFFTW(3),cellsFFTW(2),cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T,& + PETSC_COMM_WORLD,z,devNull) + if (z /= cells3) error stop 'domain decomposition mismatch (scalar)' + scalarField = fftw_alloc_complex(N) call c_f_pointer(scalarField,scalarField_real, & [2_C_INTPTR_T*(cellsFFTW(1)/2_C_INTPTR_T+1_C_INTPTR_T),cellsFFTW(2),z]) call c_f_pointer(scalarField,scalarField_fourier, & diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 8cc6d3abb..44e5c86ae 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -90,7 +90,7 @@ subroutine parallelization_init #ifdef LOGFILE write(rank_str,'(i4.4)') worldrank open(OUTPUT_UNIT,file='out.'//rank_str,status='replace',encoding='UTF-8') - open(ERROR_UNIT,file='error.'//rank_str,status='replace',encoding='UTF-8') + open(ERROR_UNIT,file='err.'//rank_str,status='replace',encoding='UTF-8') #else if (worldrank /= 0) then close(OUTPUT_UNIT) ! disable output From dc59beb59c30c225804d2f0b49ff1b2951cec8af Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 27 May 2022 07:03:06 +0200 Subject: [PATCH 5/5] following advince from FFTW3 manual silences a lot of gfortran warnings about unused variables --- src/grid/FFTW.f90 | 13 +++++++++++++ src/grid/discretization_grid.f90 | 2 +- src/grid/spectral_utilities.f90 | 5 +---- 3 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 src/grid/FFTW.f90 diff --git a/src/grid/FFTW.f90 b/src/grid/FFTW.f90 new file mode 100644 index 000000000..e552e78e0 --- /dev/null +++ b/src/grid/FFTW.f90 @@ -0,0 +1,13 @@ +!-------------------------------------------------------------------------------------------------- +!> @author Martin Diehl, KU Leuven +!> @brief Wrap FFTW3 into a module. +!-------------------------------------------------------------------------------------------------- +module FFTW3 + use, intrinsic :: ISO_C_binding + + implicit none + public + + include 'fftw3-mpi.f03' + +end module FFTW3 diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 901d35a9a..916cf6e69 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -10,6 +10,7 @@ module discretization_grid #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) use MPI_f08 #endif + use FFTW3 use prec use parallelization @@ -50,7 +51,6 @@ subroutine discretization_grid_init(restart) logical, intent(in) :: restart - include 'fftw3-mpi.f03' real(pReal), dimension(3) :: & mySize, & !< domain size of this process origin !< (global) distance to origin diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 4e7bc2583..c047d8669 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -4,13 +4,12 @@ !> @brief Utilities used by the different spectral solver variants !-------------------------------------------------------------------------------------------------- module spectral_utilities - use, intrinsic :: iso_c_binding - #include use PETScSys #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) use MPI_f08 #endif + use FFTW3 use prec use CLI @@ -26,8 +25,6 @@ module spectral_utilities implicit none private - include 'fftw3-mpi.f03' - !-------------------------------------------------------------------------------------------------- ! grid related information real(pReal), protected, public :: wgt !< weighting factor 1/Nelems