Merge remote-tracking branch 'origin/development' into no-global-dot-state

This commit is contained in:
Martin Diehl 2022-02-09 22:53:04 +01:00
commit 2110690ae4
21 changed files with 102 additions and 89 deletions

View File

@ -36,21 +36,21 @@ variables:
# Names of module files to load
# ===============================================================================================
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
COMPILER_GNU: "Compiler/GNU/10"
COMPILER_GNU: "Compiler/GNU/10"
COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1"
COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1"
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1"
MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0"
MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0"
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.2"
MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0"
MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0"
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1"
PETSC_GNU: "Libraries/PETSc/3.16.4/GNU-10-OpenMPI-4.1.2"
PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0"
PETSC_INTEL: "Libraries/PETSc/3.16.3/Intel-2022.0.1-IntelMPI-2021.5.0"
PETSC_INTEL: "Libraries/PETSc/3.16.4/Intel-2022.0.1-IntelMPI-2021.5.0"
# ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MSC: "FEM/MSC/2021.3.1"
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
HDF5Marc: "HDF5/1.12.1/Intel-19.1.2"
MSC: "FEM/MSC/2021.3.1"
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
HDF5Marc: "HDF5/1.12.1/Intel-19.1.2"
###################################################################################################

View File

@ -42,7 +42,7 @@ string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE)
if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
set(PARALLEL "OFF")
set(OPTI "OFF")
set(OPTI "DEBUG")
elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE")
set(PARALLEL "ON")
set(OPTI "DEFENSIVE")

View File

@ -9,26 +9,24 @@ if (OPENMP)
set (OPENMP_FLAGS "-fopenmp")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
if (OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-Og")
elseif (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2 -mtune=generic -flto")
set (OPTIMIZATION_FLAGS "-O2 -mtune=native")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-O3 -march=native -mtune=native -ffast-math -funroll-loops -ftree-vectorize -flto")
set (OPTIMIZATION_FLAGS "-O3 -march=native -funroll-loops -ftree-vectorize -flto")
endif ()
set (STANDARD_CHECK "-std=f2018 -pedantic-errors" )
set (LINKER_FLAGS "${LINKER_FLAGS} -Wl")
# options parsed directly to the linker
set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
# ensure to link against dynamic libraries
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE")
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE")
# position independent code
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132")
@ -123,6 +121,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -Og")
# Optimize debugging experience
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all")

View File

@ -9,12 +9,12 @@ if (OPENMP)
set (OPENMP_FLAGS "-qopenmp -parallel")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-O0 -no-ip")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost")
set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif ()
@ -110,6 +110,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# generate debug information for parameters
# Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there)
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
# generate complete debugging information
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where

View File

@ -9,7 +9,7 @@ if (OPENMP)
set (OPENMP_FLAGS "-qopenmp")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
@ -109,6 +109,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
# generate complete debugging information
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where

View File

@ -1 +1 @@
v3.0.0-alpha5-556-g97f849c09
v3.0.0-alpha5-603-ge0ed668ce

View File

@ -90,7 +90,6 @@ subroutine CPFEM_initAll
call material_init(.false.)
call phase_init
call homogenization_init
call crystallite_init
call CPFEM_init
call config_deallocate

View File

@ -68,7 +68,6 @@ subroutine CPFEM_initAll
call material_init(restart=interface_restartInc>0)
call phase_init
call homogenization_init
call crystallite_init
call CPFEM_init
call config_deallocate

View File

@ -10,7 +10,8 @@ module HDF5_utilities
#include <petsc/finclude/petscsys.h>
use PETScSys
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
use MPI
use MPI_f08
use MPI, only: MPI_INFO_NULL_F90 => MPI_INFO_NULL
#endif
#endif
@ -162,9 +163,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
character, intent(in), optional :: mode
logical, intent(in), optional :: parallel
character :: m
integer(HID_T) :: plist_id
integer :: hdferr
character :: m
integer(HID_T) :: plist_id
integer :: hdferr
if (present(mode)) then
@ -178,9 +179,15 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
#ifdef PETSC
if (present(parallel)) then
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
else
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
#else
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
else
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
#endif
end if
if(hdferr < 0) error stop 'HDF5 error'
#endif
@ -1860,7 +1867,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
globalShape !< shape of the dataset (all processes)
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id
integer, dimension(worldsize) :: &
integer(MPI_INTEGER_KIND), dimension(worldsize) :: &
readSize !< contribution of all processes
integer :: hdferr
integer(MPI_INTEGER_KIND) :: err_MPI
@ -1871,13 +1878,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
if(hdferr < 0) error stop 'HDF5 error'
!--------------------------------------------------------------------------------------------------
readSize = 0
readSize(worldrank+1) = int(localShape(ubound(localShape,1)))
readSize = 0_MPI_INTEGER_KIND
readSize(worldrank+1) = int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND)
#ifdef PETSC
if (parallel) then
call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process
call MPI_Allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
end if
#endif
@ -1954,8 +1961,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
totalShape !< shape of the dataset (all processes)
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id
integer, dimension(worldsize) :: writeSize !< contribution of all processes
integer(HID_T) :: dcpl
integer(MPI_INTEGER_KIND), dimension(worldsize) :: writeSize !< contribution of all processes
integer(HID_T) :: dcpl
integer :: hdferr
integer(MPI_INTEGER_KIND) :: err_MPI
integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T
@ -1974,11 +1981,11 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
!--------------------------------------------------------------------------------------------------
! determine the global data layout among all processes
writeSize = 0
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)))
writeSize = 0_MPI_INTEGER_KIND
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)),MPI_INTEGER_KIND)
#ifdef PETSC
if (parallel) then
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
end if
#endif

View File

@ -584,8 +584,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
character(len=pStringLen) :: formatString
select case (warning_ID)
case (42)
msg = 'parameter has no effect'
case (47)
msg = 'no valid parameter for FFTW, using FFTW_PATIENT'
case (207)
@ -594,8 +592,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
msg = 'crystallite responds elastically'
case (601)
msg = 'stiffness close to zero'
case (700)
msg = 'unknown crystal symmetry'
case (709)
msg = 'read only the first document'
case default

View File

@ -32,7 +32,7 @@ program DAMASK_grid
implicit none
type :: tLoadCase
type(tRotation) :: rot !< rotation of BC
type(tRotation) :: rot !< rotation of BC
type(tBoundaryCondition) :: stress, & !< stress BC
deformation !< deformation BC (dot_F, F, or L)
real(pReal) :: t, & !< length of increment

View File

@ -339,7 +339,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
type(tBoundaryCondition), intent(in) :: &
stress_BC, &
deformation_BC
type(tRotation), intent(in) :: &
type(tRotation), intent(in) :: &
rotation_BC
PetscErrorCode :: err_PETSc
PetscScalar, pointer, dimension(:,:,:,:) :: &

View File

@ -79,6 +79,12 @@ module grid_mechanical_spectral_basic
err_BC, & !< deviation from stress BC
err_div !< RMS of div of P
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
type(MPI_Status) :: status
#else
integer, dimension(MPI_STATUS_SIZE) :: status
#endif
integer :: &
totalIter = 0 !< total iteration in current increment
@ -244,7 +250,7 @@ subroutine grid_mechanical_spectral_basic_init
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI)
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_close(fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'

View File

@ -90,6 +90,12 @@ module grid_mechanical_spectral_polarisation
err_curl, & !< RMS of curl of F
err_div !< RMS of div of P
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
type(MPI_Status) :: status
#else
integer, dimension(MPI_STATUS_SIZE) :: status
#endif
integer :: &
totalIter = 0 !< total iteration in current increment
@ -270,7 +276,7 @@ subroutine grid_mechanical_spectral_polarisation_init
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI)
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_File_close(fileUnit,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'

View File

@ -898,7 +898,7 @@ end function math_33toVoigt6_stress
!--------------------------------------------------------------------------------------------------
!> @brief Convert 3x3 stress tensor into 6 Voigt vector.
!> @brief Convert 3x3 strain tensor into 6 Voigt vector.
!--------------------------------------------------------------------------------------------------
pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde)

View File

@ -307,9 +307,11 @@ program DAMASK_mesh
guess = .true. ! start guessing after first converged (sub)inc
timeIncOld = timeinc
end if
if (.not. cutBack .and. worldrank == 0) &
if (.not. cutBack .and. worldrank == 0) then
write(statUnit,*) totalIncsCounter, time, cutBackLevel, &
solres%converged, solres%iterationsNeeded ! write statistics about accepted solution
flush(statUnit)
endif
end do subStepLooping
cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc

View File

@ -52,13 +52,13 @@ contains
!--------------------------------------------------------------------------------------------------
subroutine parallelization_init
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize, version, subversion, devNull
character(len=4) :: rank_str
character(len=MPI_MAX_LIBRARY_VERSION_STRING) :: MPI_library_version
!$ integer :: got_env, threadLevel
!$ integer(pI32) :: OMP_NUM_THREADS
!$ character(len=6) NumThreadsString
PetscErrorCode :: err_PETSc
#ifdef _OPENMP
! If openMP is enabled, check if the MPI libary supports it and initialize accordingly.
@ -86,12 +86,22 @@ subroutine parallelization_init
if (err_MPI /= 0_MPI_INTEGER_KIND) &
error stop 'Could not determine worldrank'
if (worldrank == 0) print'(/,1x,a)', '<<<+- parallelization init -+>>>'
if (worldrank == 0) then
print'(/,1x,a)', '<<<+- parallelization init -+>>>'
call MPI_Get_library_version(MPI_library_version,devNull,err_MPI)
print'(/,1x,a)', trim(MPI_library_version)
call MPI_Get_version(version,subversion,err_MPI)
print'(1x,a,i0,a,i0)', 'MPI standard: ',version,'.',subversion
#ifdef _OPENMP
print'(1x,a,i0)', 'OpenMP version: ',openmp_version
#endif
end if
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) &
error stop 'Could not determine worldsize'
if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize
if (worldrank == 0) print'(/,1x,a,i0)', 'MPI processes: ',worldsize
call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) &
@ -128,7 +138,7 @@ subroutine parallelization_init
!$ OMP_NUM_THREADS = 4_pI32
!$ endif
!$ endif
!$ print'(1x,a,1x,i2)', 'OMP_NUM_THREADS:',OMP_NUM_THREADS
!$ print'(1x,a,i0)', 'OMP_NUM_THREADS: ',OMP_NUM_THREADS
!$ call omp_set_num_threads(OMP_NUM_THREADS)
end subroutine parallelization_init

View File

@ -323,7 +323,6 @@ module phase
phase_restore, &
plastic_nonlocal_updateCompatibility, &
converged, &
crystallite_init, &
phase_mechanical_constitutive, &
phase_thermal_constitutive, &
phase_damage_constitutive, &
@ -401,6 +400,8 @@ subroutine phase_init
call damage_init
call thermal_init(phases)
call crystallite_init()
end subroutine phase_init
@ -502,22 +503,13 @@ subroutine crystallite_init()
ce, &
co, & !< counter in integration point component loop
ip, & !< counter in integration point loop
el, & !< counter in element loop
cMax, & !< maximum number of integration point components
iMax, & !< maximum number of integration points
eMax !< maximum number of elements
el !< counter in element loop
class(tNode), pointer :: &
num_crystallite, &
phases
print'(/,1x,a)', '<<<+- crystallite init -+>>>'
cMax = homogenization_maxNconstituents
iMax = discretization_nIPs
eMax = discretization_Nelems
num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict)
num%subStepMinCryst = num_crystallite%get_asFloat ('subStepMin', defaultVal=1.0e-3_pReal)
@ -551,15 +543,9 @@ subroutine crystallite_init()
phases => config_material%get('phase')
print'(/,a42,1x,i10)', ' # of elements: ', eMax
print'( a42,1x,i10)', ' # of integration points/element: ', iMax
print'( a42,1x,i10)', 'max # of constituents/integration point: ', cMax
flush(IO_STDOUT)
!$OMP PARALLEL DO PRIVATE(ce)
do el = 1, eMax
do ip = 1, iMax
do el = 1, discretization_Nelems
do ip = 1, discretization_nIPs
ce = (el-1)*discretization_nIPs + ip
do co = 1,homogenization_Nconstituents(material_homogenizationID(ce))
call crystallite_orientations(co,ip,el)

View File

@ -879,10 +879,10 @@ function integrateStateRK(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el,A,B,C,D
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
+ dotState * Delta_t
broken = integrateStress(F_0 + (F - F_0) * Delta_t * C(stage),subFp0,subFi0,Delta_t * C(stage),co,ip,el)
broken = integrateStress(F_0 + (F-F_0) * Delta_t*C(stage),subFp0,subFi0,Delta_t*C(stage),co,ip,el)
if(broken) exit
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
dotState = plastic_dotState(Delta_t*C(stage), co,ip,el,ph,en)
if (any(IEEE_is_NaN(dotState))) exit
enddo

View File

@ -27,7 +27,7 @@ module function thermalexpansion_init(kinematics_length) result(myKinematics)
integer, intent(in) :: kinematics_length
logical, dimension(:,:), allocatable :: myKinematics
integer :: Ninstances,p,i,k
integer :: Ninstances, p, k
class(tNode), pointer :: &
phases, &
phase, &

View File

@ -150,14 +150,14 @@ function getUserName()
getUserName = c_f_string(getUserName_Cstring)
else
getUserName = 'n/a (Error!)'
endif
end if
end function getUserName
!--------------------------------------------------------------------------------------------------
!> @brief convert C string to Fortran string
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string
!> @brief Convert C string to Fortran string.
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
!--------------------------------------------------------------------------------------------------
pure function c_f_string(c_string) result(f_string)
@ -174,28 +174,23 @@ pure function c_f_string(c_string) result(f_string)
else
f_string = f_string(:i-1)
exit
endif
enddo arrayToString
end if
end do arrayToString
end function c_f_string
!--------------------------------------------------------------------------------------------------
!> @brief convert Fortran string to C string
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string
!> @brief Convert Fortran string to C string.
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
!--------------------------------------------------------------------------------------------------
pure function f_c_string(f_string) result(c_string)
character(len=*), intent(in) :: f_string
character(kind=C_CHAR), dimension(len_trim(f_string)+1) :: c_string
integer :: i
do i=1,len_trim(f_string)
c_string(i)=f_string(i:i)
enddo
c_string(len_trim(f_string)+1) = C_NULL_CHAR
c_string = transfer(trim(f_string)//C_NULL_CHAR,c_string,size=size(c_string))
end function f_c_string