Merge remote-tracking branch 'origin/development' into no-global-dot-state

This commit is contained in:
Martin Diehl 2022-02-09 22:53:04 +01:00
commit 2110690ae4
21 changed files with 102 additions and 89 deletions

View File

@ -36,21 +36,21 @@ variables:
# Names of module files to load # Names of module files to load
# =============================================================================================== # ===============================================================================================
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
COMPILER_GNU: "Compiler/GNU/10" COMPILER_GNU: "Compiler/GNU/10"
COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1" COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1"
COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1" COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1"
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1" MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.2"
MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0" MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0"
MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0" MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0"
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1" PETSC_GNU: "Libraries/PETSc/3.16.4/GNU-10-OpenMPI-4.1.2"
PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0" PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0"
PETSC_INTEL: "Libraries/PETSc/3.16.3/Intel-2022.0.1-IntelMPI-2021.5.0" PETSC_INTEL: "Libraries/PETSc/3.16.4/Intel-2022.0.1-IntelMPI-2021.5.0"
# ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MSC: "FEM/MSC/2021.3.1" MSC: "FEM/MSC/2021.3.1"
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
HDF5Marc: "HDF5/1.12.1/Intel-19.1.2" HDF5Marc: "HDF5/1.12.1/Intel-19.1.2"
################################################################################################### ###################################################################################################

View File

@ -42,7 +42,7 @@ string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE)
if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG") set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
set(PARALLEL "OFF") set(PARALLEL "OFF")
set(OPTI "OFF") set(OPTI "DEBUG")
elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE") elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE")
set(PARALLEL "ON") set(PARALLEL "ON")
set(OPTI "DEFENSIVE") set(OPTI "DEFENSIVE")

View File

@ -9,26 +9,24 @@ if (OPENMP)
set (OPENMP_FLAGS "-fopenmp") set (OPENMP_FLAGS "-fopenmp")
endif () endif ()
if (OPTIMIZATION STREQUAL "OFF") if (OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-Og")
elseif (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0") set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE") elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2 -mtune=generic -flto") set (OPTIMIZATION_FLAGS "-O2 -mtune=native")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3 -march=native -mtune=native -ffast-math -funroll-loops -ftree-vectorize -flto") set (OPTIMIZATION_FLAGS "-O3 -march=native -funroll-loops -ftree-vectorize -flto")
endif () endif ()
set (STANDARD_CHECK "-std=f2018 -pedantic-errors" ) set (STANDARD_CHECK "-std=f2018 -pedantic-errors" )
set (LINKER_FLAGS "${LINKER_FLAGS} -Wl")
# options parsed directly to the linker
set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
# ensure to link against dynamic libraries
#------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------
# Fine tuning compilation options # Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp") set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp")
# preprocessor # preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE") set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE")
# position independent code # position independent code
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132") set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132")
@ -123,6 +121,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file # Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -Og")
# Optimize debugging experience
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all")

View File

@ -9,12 +9,12 @@ if (OPENMP)
set (OPENMP_FLAGS "-qopenmp -parallel") set (OPENMP_FLAGS "-qopenmp -parallel")
endif () endif ()
if (OPTIMIZATION STREQUAL "OFF") if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-O0 -no-ip") set (OPTIMIZATION_FLAGS "-O0 -no-ip")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE") elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2") set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost") set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost" # -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif () endif ()
@ -110,6 +110,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# generate debug information for parameters # generate debug information for parameters
# Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there) # Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there)
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
# generate complete debugging information
# Additional options # Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where # -check: Checks at runtime, where

View File

@ -9,7 +9,7 @@ if (OPENMP)
set (OPENMP_FLAGS "-qopenmp") set (OPENMP_FLAGS "-qopenmp")
endif () endif ()
if (OPTIMIZATION STREQUAL "OFF") if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-O0") set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE") elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2") set (OPTIMIZATION_FLAGS "-O2")
@ -109,6 +109,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters # generate debug information for parameters
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
# generate complete debugging information
# Additional options # Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where # -check: Checks at runtime, where

View File

@ -1 +1 @@
v3.0.0-alpha5-556-g97f849c09 v3.0.0-alpha5-603-ge0ed668ce

View File

@ -90,7 +90,6 @@ subroutine CPFEM_initAll
call material_init(.false.) call material_init(.false.)
call phase_init call phase_init
call homogenization_init call homogenization_init
call crystallite_init
call CPFEM_init call CPFEM_init
call config_deallocate call config_deallocate

View File

@ -68,7 +68,6 @@ subroutine CPFEM_initAll
call material_init(restart=interface_restartInc>0) call material_init(restart=interface_restartInc>0)
call phase_init call phase_init
call homogenization_init call homogenization_init
call crystallite_init
call CPFEM_init call CPFEM_init
call config_deallocate call config_deallocate

View File

@ -10,7 +10,8 @@ module HDF5_utilities
#include <petsc/finclude/petscsys.h> #include <petsc/finclude/petscsys.h>
use PETScSys use PETScSys
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
use MPI use MPI_f08
use MPI, only: MPI_INFO_NULL_F90 => MPI_INFO_NULL
#endif #endif
#endif #endif
@ -162,9 +163,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
character, intent(in), optional :: mode character, intent(in), optional :: mode
logical, intent(in), optional :: parallel logical, intent(in), optional :: parallel
character :: m character :: m
integer(HID_T) :: plist_id integer(HID_T) :: plist_id
integer :: hdferr integer :: hdferr
if (present(mode)) then if (present(mode)) then
@ -178,9 +179,15 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
#ifdef PETSC #ifdef PETSC
if (present(parallel)) then if (present(parallel)) then
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
else
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
#else
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
else else
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
#endif
end if end if
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
#endif #endif
@ -1860,7 +1867,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
globalShape !< shape of the dataset (all processes) globalShape !< shape of the dataset (all processes)
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id
integer, dimension(worldsize) :: & integer(MPI_INTEGER_KIND), dimension(worldsize) :: &
readSize !< contribution of all processes readSize !< contribution of all processes
integer :: hdferr integer :: hdferr
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
@ -1871,13 +1878,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
readSize = 0 readSize = 0_MPI_INTEGER_KIND
readSize(worldrank+1) = int(localShape(ubound(localShape,1))) readSize(worldrank+1) = int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND)
#ifdef PETSC #ifdef PETSC
if (parallel) then if (parallel) then
call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error' if(hdferr < 0) error stop 'HDF5 error'
call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process call MPI_Allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
end if end if
#endif #endif
@ -1954,8 +1961,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
totalShape !< shape of the dataset (all processes) totalShape !< shape of the dataset (all processes)
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id
integer, dimension(worldsize) :: writeSize !< contribution of all processes integer(MPI_INTEGER_KIND), dimension(worldsize) :: writeSize !< contribution of all processes
integer(HID_T) :: dcpl integer(HID_T) :: dcpl
integer :: hdferr integer :: hdferr
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T
@ -1974,11 +1981,11 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! determine the global data layout among all processes ! determine the global data layout among all processes
writeSize = 0 writeSize = 0_MPI_INTEGER_KIND
writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) writeSize(worldrank+1) = int(myShape(ubound(myShape,1)),MPI_INTEGER_KIND)
#ifdef PETSC #ifdef PETSC
if (parallel) then if (parallel) then
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
end if end if
#endif #endif
@ -2009,7 +2016,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
if (hdferr < 0) error stop 'HDF5 error' if (hdferr < 0) error stop 'HDF5 error'
end if end if
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! create dataspace in memory (local shape) and in file (global shape) ! create dataspace in memory (local shape) and in file (global shape)
call H5Screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape) call H5Screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape)

View File

@ -584,8 +584,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
character(len=pStringLen) :: formatString character(len=pStringLen) :: formatString
select case (warning_ID) select case (warning_ID)
case (42)
msg = 'parameter has no effect'
case (47) case (47)
msg = 'no valid parameter for FFTW, using FFTW_PATIENT' msg = 'no valid parameter for FFTW, using FFTW_PATIENT'
case (207) case (207)
@ -594,8 +592,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
msg = 'crystallite responds elastically' msg = 'crystallite responds elastically'
case (601) case (601)
msg = 'stiffness close to zero' msg = 'stiffness close to zero'
case (700)
msg = 'unknown crystal symmetry'
case (709) case (709)
msg = 'read only the first document' msg = 'read only the first document'
case default case default

View File

@ -32,7 +32,7 @@ program DAMASK_grid
implicit none implicit none
type :: tLoadCase type :: tLoadCase
type(tRotation) :: rot !< rotation of BC type(tRotation) :: rot !< rotation of BC
type(tBoundaryCondition) :: stress, & !< stress BC type(tBoundaryCondition) :: stress, & !< stress BC
deformation !< deformation BC (dot_F, F, or L) deformation !< deformation BC (dot_F, F, or L)
real(pReal) :: t, & !< length of increment real(pReal) :: t, & !< length of increment

View File

@ -339,7 +339,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
type(tBoundaryCondition), intent(in) :: & type(tBoundaryCondition), intent(in) :: &
stress_BC, & stress_BC, &
deformation_BC deformation_BC
type(tRotation), intent(in) :: & type(tRotation), intent(in) :: &
rotation_BC rotation_BC
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
PetscScalar, pointer, dimension(:,:,:,:) :: & PetscScalar, pointer, dimension(:,:,:,:) :: &

View File

@ -79,6 +79,12 @@ module grid_mechanical_spectral_basic
err_BC, & !< deviation from stress BC err_BC, & !< deviation from stress BC
err_div !< RMS of div of P err_div !< RMS of div of P
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
type(MPI_Status) :: status
#else
integer, dimension(MPI_STATUS_SIZE) :: status
#endif
integer :: & integer :: &
totalIter = 0 !< total iteration in current increment totalIter = 0 !< total iteration in current increment
@ -244,7 +250,7 @@ subroutine grid_mechanical_spectral_basic_init
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_close(fileUnit,err_MPI) call MPI_File_close(fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'

View File

@ -90,6 +90,12 @@ module grid_mechanical_spectral_polarisation
err_curl, & !< RMS of curl of F err_curl, & !< RMS of curl of F
err_div !< RMS of div of P err_div !< RMS of div of P
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
type(MPI_Status) :: status
#else
integer, dimension(MPI_STATUS_SIZE) :: status
#endif
integer :: & integer :: &
totalIter = 0 !< total iteration in current increment totalIter = 0 !< total iteration in current increment
@ -270,7 +276,7 @@ subroutine grid_mechanical_spectral_polarisation_init
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_close(fileUnit,err_MPI) call MPI_File_close(fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'

View File

@ -898,7 +898,7 @@ end function math_33toVoigt6_stress
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Convert 3x3 stress tensor into 6 Voigt vector. !> @brief Convert 3x3 strain tensor into 6 Voigt vector.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde) pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde)

View File

@ -307,9 +307,11 @@ program DAMASK_mesh
guess = .true. ! start guessing after first converged (sub)inc guess = .true. ! start guessing after first converged (sub)inc
timeIncOld = timeinc timeIncOld = timeinc
end if end if
if (.not. cutBack .and. worldrank == 0) & if (.not. cutBack .and. worldrank == 0) then
write(statUnit,*) totalIncsCounter, time, cutBackLevel, & write(statUnit,*) totalIncsCounter, time, cutBackLevel, &
solres%converged, solres%iterationsNeeded ! write statistics about accepted solution solres%converged, solres%iterationsNeeded ! write statistics about accepted solution
flush(statUnit)
endif
end do subStepLooping end do subStepLooping
cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc

View File

@ -52,13 +52,13 @@ contains
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine parallelization_init subroutine parallelization_init
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize integer(MPI_INTEGER_KIND) :: err_MPI, typeSize, version, subversion, devNull
character(len=4) :: rank_str character(len=4) :: rank_str
character(len=MPI_MAX_LIBRARY_VERSION_STRING) :: MPI_library_version
!$ integer :: got_env, threadLevel !$ integer :: got_env, threadLevel
!$ integer(pI32) :: OMP_NUM_THREADS !$ integer(pI32) :: OMP_NUM_THREADS
!$ character(len=6) NumThreadsString !$ character(len=6) NumThreadsString
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
#ifdef _OPENMP #ifdef _OPENMP
! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly.
@ -86,12 +86,22 @@ subroutine parallelization_init
if (err_MPI /= 0_MPI_INTEGER_KIND) & if (err_MPI /= 0_MPI_INTEGER_KIND) &
error stop 'Could not determine worldrank' error stop 'Could not determine worldrank'
if (worldrank == 0) print'(/,1x,a)', '<<<+- parallelization init -+>>>' if (worldrank == 0) then
print'(/,1x,a)', '<<<+- parallelization init -+>>>'
call MPI_Get_library_version(MPI_library_version,devNull,err_MPI)
print'(/,1x,a)', trim(MPI_library_version)
call MPI_Get_version(version,subversion,err_MPI)
print'(1x,a,i0,a,i0)', 'MPI standard: ',version,'.',subversion
#ifdef _OPENMP
print'(1x,a,i0)', 'OpenMP version: ',openmp_version
#endif
end if
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI) call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) & if (err_MPI /= 0_MPI_INTEGER_KIND) &
error stop 'Could not determine worldsize' error stop 'Could not determine worldsize'
if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize if (worldrank == 0) print'(/,1x,a,i0)', 'MPI processes: ',worldsize
call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI) call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) & if (err_MPI /= 0_MPI_INTEGER_KIND) &
@ -128,7 +138,7 @@ subroutine parallelization_init
!$ OMP_NUM_THREADS = 4_pI32 !$ OMP_NUM_THREADS = 4_pI32
!$ endif !$ endif
!$ endif !$ endif
!$ print'(1x,a,1x,i2)', 'OMP_NUM_THREADS:',OMP_NUM_THREADS !$ print'(1x,a,i0)', 'OMP_NUM_THREADS: ',OMP_NUM_THREADS
!$ call omp_set_num_threads(OMP_NUM_THREADS) !$ call omp_set_num_threads(OMP_NUM_THREADS)
end subroutine parallelization_init end subroutine parallelization_init

View File

@ -323,7 +323,6 @@ module phase
phase_restore, & phase_restore, &
plastic_nonlocal_updateCompatibility, & plastic_nonlocal_updateCompatibility, &
converged, & converged, &
crystallite_init, &
phase_mechanical_constitutive, & phase_mechanical_constitutive, &
phase_thermal_constitutive, & phase_thermal_constitutive, &
phase_damage_constitutive, & phase_damage_constitutive, &
@ -401,6 +400,8 @@ subroutine phase_init
call damage_init call damage_init
call thermal_init(phases) call thermal_init(phases)
call crystallite_init()
end subroutine phase_init end subroutine phase_init
@ -502,22 +503,13 @@ subroutine crystallite_init()
ce, & ce, &
co, & !< counter in integration point component loop co, & !< counter in integration point component loop
ip, & !< counter in integration point loop ip, & !< counter in integration point loop
el, & !< counter in element loop el !< counter in element loop
cMax, & !< maximum number of integration point components
iMax, & !< maximum number of integration points
eMax !< maximum number of elements
class(tNode), pointer :: & class(tNode), pointer :: &
num_crystallite, & num_crystallite, &
phases phases
print'(/,1x,a)', '<<<+- crystallite init -+>>>'
cMax = homogenization_maxNconstituents
iMax = discretization_nIPs
eMax = discretization_Nelems
num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict) num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict)
num%subStepMinCryst = num_crystallite%get_asFloat ('subStepMin', defaultVal=1.0e-3_pReal) num%subStepMinCryst = num_crystallite%get_asFloat ('subStepMin', defaultVal=1.0e-3_pReal)
@ -551,15 +543,9 @@ subroutine crystallite_init()
phases => config_material%get('phase') phases => config_material%get('phase')
print'(/,a42,1x,i10)', ' # of elements: ', eMax
print'( a42,1x,i10)', ' # of integration points/element: ', iMax
print'( a42,1x,i10)', 'max # of constituents/integration point: ', cMax
flush(IO_STDOUT)
!$OMP PARALLEL DO PRIVATE(ce) !$OMP PARALLEL DO PRIVATE(ce)
do el = 1, eMax do el = 1, discretization_Nelems
do ip = 1, iMax do ip = 1, discretization_nIPs
ce = (el-1)*discretization_nIPs + ip ce = (el-1)*discretization_nIPs + ip
do co = 1,homogenization_Nconstituents(material_homogenizationID(ce)) do co = 1,homogenization_Nconstituents(material_homogenizationID(ce))
call crystallite_orientations(co,ip,el) call crystallite_orientations(co,ip,el)

View File

@ -879,10 +879,10 @@ function integrateStateRK(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el,A,B,C,D
plasticState(ph)%state(1:sizeDotState,en) = subState0 & plasticState(ph)%state(1:sizeDotState,en) = subState0 &
+ dotState * Delta_t + dotState * Delta_t
broken = integrateStress(F_0 + (F - F_0) * Delta_t * C(stage),subFp0,subFi0,Delta_t * C(stage),co,ip,el) broken = integrateStress(F_0 + (F-F_0) * Delta_t*C(stage),subFp0,subFi0,Delta_t*C(stage),co,ip,el)
if(broken) exit if(broken) exit
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en) dotState = plastic_dotState(Delta_t*C(stage), co,ip,el,ph,en)
if (any(IEEE_is_NaN(dotState))) exit if (any(IEEE_is_NaN(dotState))) exit
enddo enddo

View File

@ -27,7 +27,7 @@ module function thermalexpansion_init(kinematics_length) result(myKinematics)
integer, intent(in) :: kinematics_length integer, intent(in) :: kinematics_length
logical, dimension(:,:), allocatable :: myKinematics logical, dimension(:,:), allocatable :: myKinematics
integer :: Ninstances,p,i,k integer :: Ninstances, p, k
class(tNode), pointer :: & class(tNode), pointer :: &
phases, & phases, &
phase, & phase, &

View File

@ -24,7 +24,7 @@ module system_routines
function setCWD_C(cwd) bind(C) function setCWD_C(cwd) bind(C)
use, intrinsic :: ISO_C_Binding, only: C_INT, C_CHAR use, intrinsic :: ISO_C_Binding, only: C_INT, C_CHAR
integer(C_INT) :: setCWD_C integer(C_INT) :: setCWD_C
character(kind=C_CHAR), dimension(*), intent(in) :: cwd character(kind=C_CHAR), dimension(*), intent(in) :: cwd
end function setCWD_C end function setCWD_C
@ -150,14 +150,14 @@ function getUserName()
getUserName = c_f_string(getUserName_Cstring) getUserName = c_f_string(getUserName_Cstring)
else else
getUserName = 'n/a (Error!)' getUserName = 'n/a (Error!)'
endif end if
end function getUserName end function getUserName
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief convert C string to Fortran string !> @brief Convert C string to Fortran string.
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string !> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure function c_f_string(c_string) result(f_string) pure function c_f_string(c_string) result(f_string)
@ -174,28 +174,23 @@ pure function c_f_string(c_string) result(f_string)
else else
f_string = f_string(:i-1) f_string = f_string(:i-1)
exit exit
endif end if
enddo arrayToString end do arrayToString
end function c_f_string end function c_f_string
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief convert Fortran string to C string !> @brief Convert Fortran string to C string.
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string !> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
pure function f_c_string(f_string) result(c_string) pure function f_c_string(f_string) result(c_string)
character(len=*), intent(in) :: f_string character(len=*), intent(in) :: f_string
character(kind=C_CHAR), dimension(len_trim(f_string)+1) :: c_string character(kind=C_CHAR), dimension(len_trim(f_string)+1) :: c_string
integer :: i
c_string = transfer(trim(f_string)//C_NULL_CHAR,c_string,size=size(c_string))
do i=1,len_trim(f_string)
c_string(i)=f_string(i:i)
enddo
c_string(len_trim(f_string)+1) = C_NULL_CHAR
end function f_c_string end function f_c_string