clean exit with stack trace

This commit is contained in:
Martin Diehl 2020-11-11 14:06:21 +01:00
parent 8e89452791
commit 3dd5eaf1c1
2 changed files with 20 additions and 23 deletions

View File

@ -499,10 +499,6 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
!-------------------------------------------------------------------------------------------------
! errors related to the grid solver
case (809)
msg = 'initializing FFTW'
case (810)
msg = 'FFTW plan creation'
case (831)
msg = 'mask consistency violated in grid load case'
case (832)

View File

@ -313,12 +313,12 @@ subroutine spectral_utilities_init
tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
tensorField_real, tensorField_fourier, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision
if (.not. C_ASSOCIATED(planTensorForth)) call IO_error(810, ext_msg='planTensorForth')
if (.not. C_ASSOCIATED(planTensorForth)) error stop 'FFTW error'
planTensorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
tensorField_fourier,tensorField_real, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision
if (.not. C_ASSOCIATED(planTensorBack)) call IO_error(810, ext_msg='planTensorBack')
if (.not. C_ASSOCIATED(planTensorBack)) error stop 'FFTW error'
!--------------------------------------------------------------------------------------------------
! vector MPI fftw plans
@ -326,12 +326,12 @@ subroutine spectral_utilities_init
vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK,&! no. of transforms, default iblock and oblock
vectorField_real, vectorField_fourier, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision
if (.not. C_ASSOCIATED(planVectorForth)) call IO_error(810, ext_msg='planVectorForth')
if (.not. C_ASSOCIATED(planVectorForth)) error stop 'FFTW error'
planVectorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order
vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! no. of transforms, default iblock and oblock
vectorField_fourier,vectorField_real, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision
if (.not. C_ASSOCIATED(planVectorBack)) call IO_error(810, ext_msg='planVectorBack')
if (.not. C_ASSOCIATED(planVectorBack)) error stop 'FFTW error'
!--------------------------------------------------------------------------------------------------
! scalar MPI fftw plans
@ -339,12 +339,12 @@ subroutine spectral_utilities_init
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
scalarField_real, scalarField_fourier, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision
if (.not. C_ASSOCIATED(planScalarForth)) call IO_error(810, ext_msg='planScalarForth')
if (.not. C_ASSOCIATED(planScalarForth)) error stop 'FFTW error'
planScalarBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order, no. of transforms
scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock
scalarField_fourier,scalarField_real, & ! input data, output data
PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision
if (.not. C_ASSOCIATED(planScalarBack)) call IO_error(810, ext_msg='planScalarBack')
if (.not. C_ASSOCIATED(planScalarBack)) error stop 'FFTW error'
!--------------------------------------------------------------------------------------------------
! calculation of discrete angular frequencies, ordered as in FFTW (wrap around)
@ -603,7 +603,7 @@ real(pReal) function utilities_divergenceRMS()
enddo; enddo
if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if(ierr /=0) call IO_error(894, ext_msg='utilities_divergenceRMS')
if(ierr /=0) error stop 'MPI error'
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
@ -664,7 +664,7 @@ real(pReal) function utilities_curlRMS()
enddo; enddo
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if(ierr /=0) call IO_error(894, ext_msg='utilities_curlRMS')
if(ierr /=0) error stop 'MPI error'
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
@ -857,20 +857,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce max')
if (ierr /= 0) error stop 'MPI error'
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast max')
if (ierr /= 0) error stop 'MPI error'
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Allreduce min')
if (ierr /= 0) error stop 'MPI error'
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
if (ierr /= 0) call IO_error(894, ext_msg='MPI_Bcast min')
if (ierr /= 0) error stop 'MPI error'
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
C_volAvg = sum(sum(homogenization_dPdF,dim=6),dim=5)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if (ierr /= 0) error stop 'MPI error'
C_volAvg = C_volAvg * wgt
@ -1035,7 +1036,7 @@ subroutine utilities_updateCoords(F)
! average F
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Bcast')
if(ierr /=0) error stop 'MPI error'
!--------------------------------------------------------------------------------------------------
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
@ -1046,19 +1047,19 @@ subroutine utilities_updateCoords(F)
! send bottom layer to process below
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Isend')
if(ierr /=0) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv')
if(ierr /=0) error stop 'MPI error'
call MPI_Wait(r,s,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Wait')
if(ierr /=0) error stop 'MPI error'
! send top layer to process above
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Isend')
if(ierr /=0) error stop 'MPI error'
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,r,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Irecv')
if(ierr /=0) error stop 'MPI error'
call MPI_Wait(r,s,ierr)
if(ierr /=0) call IO_error(894, ext_msg='update_IPcoords/MPI_Wait')
if(ierr /=0) error stop 'MPI error'
!--------------------------------------------------------------------------------------------------
! calculate nodal displacements