for some reasons, ifort was complaining about mixed integer types.

also deleted #ifdef PETSC since we do not support the solver without PETSC
This commit is contained in:
Martin Diehl 2015-03-30 20:49:17 +00:00
parent 8b1b397be6
commit 63491bf268
2 changed files with 23 additions and 17 deletions

View File

@ -175,8 +175,11 @@ subroutine utilities_init()
scalarFieldMPI, & !< field cotaining data for FFTW in real space when debugging FFTW (no in place) scalarFieldMPI, & !< field cotaining data for FFTW in real space when debugging FFTW (no in place)
div, & !< field cotaining data for FFTW in real and fourier space when debugging divergence (in place) div, & !< field cotaining data for FFTW in real and fourier space when debugging divergence (in place)
coordsMPI coordsMPI
integer(C_INTPTR_T) :: gridFFTW(3), alloc_local, local_K, local_K_offset, & integer(C_INTPTR_T) :: gridFFTW(3), alloc_local, local_K, local_K_offset
dimSize = 3, scalarSize = 1, vecSize = 3, tensorSize = 9 integer(C_INTPTR_T), parameter :: &
scalarSize = 1_C_INTPTR_T, &
vecSize = 3_C_INTPTR_T, &
tensorSize = 9_C_INTPTR_T
mainProcess: if (worldrank == 0) then mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- DAMASK_spectral_utilities init -+>>>' write(6,'(/,a)') ' <<<+- DAMASK_spectral_utilities init -+>>>'
@ -192,7 +195,7 @@ subroutine utilities_init()
debugFFTW = iand(debug_level(debug_SPECTRAL),debug_SPECTRALFFTW) /= 0 debugFFTW = iand(debug_level(debug_SPECTRAL),debug_SPECTRALFFTW) /= 0
debugRotation = iand(debug_level(debug_SPECTRAL),debug_SPECTRALROTATION) /= 0 debugRotation = iand(debug_level(debug_SPECTRAL),debug_SPECTRALROTATION) /= 0
debugPETSc = iand(debug_level(debug_SPECTRAL),debug_SPECTRALPETSC) /= 0 debugPETSc = iand(debug_level(debug_SPECTRAL),debug_SPECTRALPETSC) /= 0
#ifdef PETSc
if(debugPETSc .and. worldrank == 0_pInt) write(6,'(3(/,a),/)') & if(debugPETSc .and. worldrank == 0_pInt) write(6,'(3(/,a),/)') &
' Initializing PETSc with debug options: ', & ' Initializing PETSc with debug options: ', &
trim(PETScDebug), & trim(PETScDebug), &
@ -201,9 +204,6 @@ subroutine utilities_init()
call PetscOptionsClear(ierr); CHKERRQ(ierr) call PetscOptionsClear(ierr); CHKERRQ(ierr)
if(debugPETSc) call PetscOptionsInsertString(trim(PETSCDEBUG),ierr); CHKERRQ(ierr) if(debugPETSc) call PetscOptionsInsertString(trim(PETSCDEBUG),ierr); CHKERRQ(ierr)
call PetscOptionsInsertString(trim(petsc_options),ierr); CHKERRQ(ierr) call PetscOptionsInsertString(trim(petsc_options),ierr); CHKERRQ(ierr)
#else
if(debugPETSc) call IO_warning(41_pInt, ext_msg='debug PETSc')
#endif
grid1Red = gridLocal(1)/2_pInt + 1_pInt grid1Red = gridLocal(1)/2_pInt + 1_pInt
wgt = 1.0/real(product(gridGlobal),pReal) wgt = 1.0/real(product(gridGlobal),pReal)
@ -233,17 +233,21 @@ subroutine utilities_init()
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! MPI allocation ! MPI allocation
gridFFTW = gridGlobal gridFFTW = int(gridGlobal,C_INTPTR_T)
alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, & alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, &
MPI_COMM_WORLD, local_K, local_K_offset) MPI_COMM_WORLD, local_K, local_K_offset)
tensorFieldMPI = fftw_alloc_complex(9*alloc_local) tensorFieldMPI = fftw_alloc_complex(9*alloc_local)
call c_f_pointer(tensorFieldMPI, field_realMPI, [dimSize,dimSize,2*(gridFFTW(1)/2 + 1),gridFFTW(2),local_K]) call c_f_pointer(tensorFieldMPI, field_realMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
call c_f_pointer(tensorFieldMPI, field_fourierMPI,[dimSize,dimSize, gridFFTW(1)/2 + 1 ,gridFFTW(2),local_K]) 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K])
call c_f_pointer(tensorFieldMPI, field_fourierMPI, [3_C_INTPTR_T,3_C_INTPTR_T, &
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K])
allocate (xi(3,grid1Red,gridLocal(2),gridLocal(3)),source = 0.0_pReal) ! frequencies, only half the size for first dimension allocate (xi(3,grid1Red,gridLocal(2),gridLocal(3)),source = 0.0_pReal) ! frequencies, only half the size for first dimension
coordsMPI = fftw_alloc_complex(3*alloc_local) coordsMPI = fftw_alloc_complex(3*alloc_local)
call c_f_pointer(coordsMPI, coords_realMPI, [dimSize,2*(gridFFTW(1)/2 + 1),gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw call c_f_pointer(coordsMPI, coords_realMPI, [3_C_INTPTR_T,&
call c_f_pointer(coordsMPI, coords_fourierMPI, [dimSize, gridFFTW(1)/2 + 1 ,gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw
call c_f_pointer(coordsMPI, coords_fourierMPI,[3_C_INTPTR_T,&
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K]) ! place a pointer for a real representation on coords_fftw
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! MPI fftw plans ! MPI fftw plans
@ -267,8 +271,10 @@ subroutine utilities_init()
! depending on debug options, allocate more memory and create additional plans ! depending on debug options, allocate more memory and create additional plans
if (debugDivergence) then if (debugDivergence) then
div = fftw_alloc_complex(3*alloc_local) div = fftw_alloc_complex(3*alloc_local)
call c_f_pointer(div,divRealMPI, [dimSize,2*(gridFFTW(1)/2 + 1),gridFFTW(2),local_K]) call c_f_pointer(div,divRealMPI, [3_C_INTPTR_T,&
call c_f_pointer(div,divFourierMPI,[dimSize, gridFFTW(1)/2 + 1 ,gridFFTW(2),local_K]) 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K])
call c_f_pointer(div,divFourierMPI,[3_C_INTPTR_T,&
gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T, gridFFTW(2),local_K])
planDivMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2) ,gridFFTW(1)],vecSize, & planDivMPI = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2) ,gridFFTW(1)],vecSize, &
FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &
divFourierMPI, divRealMPI, & divFourierMPI, divRealMPI, &