diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f3e74e030..8e3884851 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -36,21 +36,21 @@ variables: # Names of module files to load # =============================================================================================== # ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - COMPILER_GNU: "Compiler/GNU/10" + COMPILER_GNU: "Compiler/GNU/10" COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1" COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1" # ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1" - MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0" - MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0" + MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.2" + MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0" + MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0" # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1" + PETSC_GNU: "Libraries/PETSc/3.16.4/GNU-10-OpenMPI-4.1.2" PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0" - PETSC_INTEL: "Libraries/PETSc/3.16.3/Intel-2022.0.1-IntelMPI-2021.5.0" + PETSC_INTEL: "Libraries/PETSc/3.16.4/Intel-2022.0.1-IntelMPI-2021.5.0" # ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - MSC: "FEM/MSC/2021.3.1" - IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" - HDF5Marc: "HDF5/1.12.1/Intel-19.1.2" + MSC: "FEM/MSC/2021.3.1" + IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" + HDF5Marc: "HDF5/1.12.1/Intel-19.1.2" ################################################################################################### diff --git a/CMakeLists.txt b/CMakeLists.txt index 8c7b129f0..b4c405319 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,7 +42,7 @@ string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE) if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG") set(PARALLEL "OFF") - set(OPTI "OFF") + set(OPTI "DEBUG") elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE") set(PARALLEL "ON") set(OPTI "DEFENSIVE") diff --git a/cmake/Compiler-GNU.cmake b/cmake/Compiler-GNU.cmake index 5e2bf2ce6..c204d1af2 100644 --- a/cmake/Compiler-GNU.cmake +++ b/cmake/Compiler-GNU.cmake @@ -9,26 +9,24 @@ if (OPENMP) set (OPENMP_FLAGS "-fopenmp") endif () -if (OPTIMIZATION STREQUAL "OFF") +if (OPTIMIZATION STREQUAL "DEBUG") + set (OPTIMIZATION_FLAGS "-Og") +elseif (OPTIMIZATION STREQUAL "OFF") set (OPTIMIZATION_FLAGS "-O0") elseif (OPTIMIZATION STREQUAL "DEFENSIVE") - set (OPTIMIZATION_FLAGS "-O2 -mtune=generic -flto") + set (OPTIMIZATION_FLAGS "-O2 -mtune=native") elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") - set (OPTIMIZATION_FLAGS "-O3 -march=native -mtune=native -ffast-math -funroll-loops -ftree-vectorize -flto") + set (OPTIMIZATION_FLAGS "-O3 -march=native -funroll-loops -ftree-vectorize -flto") endif () set (STANDARD_CHECK "-std=f2018 -pedantic-errors" ) -set (LINKER_FLAGS "${LINKER_FLAGS} -Wl") -# options parsed directly to the linker -set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" ) -# ensure to link against dynamic libraries #------------------------------------------------------------------------------------------------ # Fine tuning compilation options set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp") # preprocessor -set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE") +set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE") # position independent code set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132") @@ -123,6 +121,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow") set (DEBUG_FLAGS "${DEBUG_FLAGS} -g") # Generate symbolic debugging information in the object file +set (DEBUG_FLAGS "${DEBUG_FLAGS} -Og") +# Optimize debugging experience + set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core") set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all") diff --git a/cmake/Compiler-Intel.cmake b/cmake/Compiler-Intel.cmake index 3afffd2be..7f34e4a13 100644 --- a/cmake/Compiler-Intel.cmake +++ b/cmake/Compiler-Intel.cmake @@ -9,12 +9,12 @@ if (OPENMP) set (OPENMP_FLAGS "-qopenmp -parallel") endif () -if (OPTIMIZATION STREQUAL "OFF") +if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG") set (OPTIMIZATION_FLAGS "-O0 -no-ip") elseif (OPTIMIZATION STREQUAL "DEFENSIVE") set (OPTIMIZATION_FLAGS "-O2") elseif (OPTIMIZATION STREQUAL "AGGRESSIVE") - set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost") + set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost") # -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost" endif () @@ -110,6 +110,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0") # generate debug information for parameters # Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there) +set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all") +# generate complete debugging information + # Additional options # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -check: Checks at runtime, where diff --git a/cmake/Compiler-IntelLLVM.cmake b/cmake/Compiler-IntelLLVM.cmake index 326cfe319..883873e1c 100644 --- a/cmake/Compiler-IntelLLVM.cmake +++ b/cmake/Compiler-IntelLLVM.cmake @@ -9,7 +9,7 @@ if (OPENMP) set (OPENMP_FLAGS "-qopenmp") endif () -if (OPTIMIZATION STREQUAL "OFF") +if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG") set (OPTIMIZATION_FLAGS "-O0") elseif (OPTIMIZATION STREQUAL "DEFENSIVE") set (OPTIMIZATION_FLAGS "-O2") @@ -109,6 +109,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0") set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") # generate debug information for parameters +set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all") +# generate complete debugging information + # Additional options # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -check: Checks at runtime, where diff --git a/python/damask/VERSION b/python/damask/VERSION index b4c8c4b3c..69974f6be 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-556-g97f849c09 +v3.0.0-alpha5-603-ge0ed668ce diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 0f7590782..c8b7e3cca 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -90,7 +90,6 @@ subroutine CPFEM_initAll call material_init(.false.) call phase_init call homogenization_init - call crystallite_init call CPFEM_init call config_deallocate diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index ed8fb611b..b24ba5480 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -68,7 +68,6 @@ subroutine CPFEM_initAll call material_init(restart=interface_restartInc>0) call phase_init call homogenization_init - call crystallite_init call CPFEM_init call config_deallocate diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index d2076c2cc..93de4053b 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -10,7 +10,8 @@ module HDF5_utilities #include use PETScSys #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) - use MPI + use MPI_f08 + use MPI, only: MPI_INFO_NULL_F90 => MPI_INFO_NULL #endif #endif @@ -162,9 +163,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel) character, intent(in), optional :: mode logical, intent(in), optional :: parallel - character :: m - integer(HID_T) :: plist_id - integer :: hdferr + character :: m + integer(HID_T) :: plist_id + integer :: hdferr if (present(mode)) then @@ -178,9 +179,15 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel) #ifdef PETSC if (present(parallel)) then +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr) + else + call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr) +#else if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) else call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) +#endif end if if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1860,7 +1867,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ globalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id - integer, dimension(worldsize) :: & + integer(MPI_INTEGER_KIND), dimension(worldsize) :: & readSize !< contribution of all processes integer :: hdferr integer(MPI_INTEGER_KIND) :: err_MPI @@ -1871,13 +1878,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- - readSize = 0 - readSize(worldrank+1) = int(localShape(ubound(localShape,1))) + readSize = 0_MPI_INTEGER_KIND + readSize(worldrank+1) = int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND) #ifdef PETSC if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + call MPI_Allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif @@ -1954,8 +1961,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & totalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id - integer, dimension(worldsize) :: writeSize !< contribution of all processes - integer(HID_T) :: dcpl + integer(MPI_INTEGER_KIND), dimension(worldsize) :: writeSize !< contribution of all processes + integer(HID_T) :: dcpl integer :: hdferr integer(MPI_INTEGER_KIND) :: err_MPI integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T @@ -1974,11 +1981,11 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & !-------------------------------------------------------------------------------------------------- ! determine the global data layout among all processes - writeSize = 0 - writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) + writeSize = 0_MPI_INTEGER_KIND + writeSize(worldrank+1) = int(myShape(ubound(myShape,1)),MPI_INTEGER_KIND) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif @@ -2009,7 +2016,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & if (hdferr < 0) error stop 'HDF5 error' end if end if - + !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape) and in file (global shape) call H5Screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape) diff --git a/src/IO.f90 b/src/IO.f90 index ec7cf9704..441a0bf3d 100644 --- a/src/IO.f90 +++ b/src/IO.f90 @@ -584,8 +584,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg) character(len=pStringLen) :: formatString select case (warning_ID) - case (42) - msg = 'parameter has no effect' case (47) msg = 'no valid parameter for FFTW, using FFTW_PATIENT' case (207) @@ -594,8 +592,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg) msg = 'crystallite responds elastically' case (601) msg = 'stiffness close to zero' - case (700) - msg = 'unknown crystal symmetry' case (709) msg = 'read only the first document' case default diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index f1834d16b..918f477b1 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -32,7 +32,7 @@ program DAMASK_grid implicit none type :: tLoadCase - type(tRotation) :: rot !< rotation of BC + type(tRotation) :: rot !< rotation of BC type(tBoundaryCondition) :: stress, & !< stress BC deformation !< deformation BC (dot_F, F, or L) real(pReal) :: t, & !< length of increment diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 6ad035fc2..3c42c8c23 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -339,7 +339,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai type(tBoundaryCondition), intent(in) :: & stress_BC, & deformation_BC - type(tRotation), intent(in) :: & + type(tRotation), intent(in) :: & rotation_BC PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: & diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index fa2e17bd9..2f2b73f01 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -79,6 +79,12 @@ module grid_mechanical_spectral_basic err_BC, & !< deviation from stress BC err_div !< RMS of div of P +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + type(MPI_Status) :: status +#else + integer, dimension(MPI_STATUS_SIZE) :: status +#endif + integer :: & totalIter = 0 !< total iteration in current increment @@ -244,7 +250,7 @@ subroutine grid_mechanical_spectral_basic_init call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_File_close(fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index ee87c77a6..b72cc4232 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -90,6 +90,12 @@ module grid_mechanical_spectral_polarisation err_curl, & !< RMS of curl of F err_div !< RMS of div of P +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + type(MPI_Status) :: status +#else + integer, dimension(MPI_STATUS_SIZE) :: status +#endif + integer :: & totalIter = 0 !< total iteration in current increment @@ -270,7 +276,7 @@ subroutine grid_mechanical_spectral_polarisation_init call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_File_close(fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' diff --git a/src/math.f90 b/src/math.f90 index 5c6b10fdd..dd4690672 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -898,7 +898,7 @@ end function math_33toVoigt6_stress !-------------------------------------------------------------------------------------------------- -!> @brief Convert 3x3 stress tensor into 6 Voigt vector. +!> @brief Convert 3x3 strain tensor into 6 Voigt vector. !-------------------------------------------------------------------------------------------------- pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde) diff --git a/src/mesh/DAMASK_mesh.f90 b/src/mesh/DAMASK_mesh.f90 index aa156ec2d..065eca1cb 100644 --- a/src/mesh/DAMASK_mesh.f90 +++ b/src/mesh/DAMASK_mesh.f90 @@ -307,9 +307,11 @@ program DAMASK_mesh guess = .true. ! start guessing after first converged (sub)inc timeIncOld = timeinc end if - if (.not. cutBack .and. worldrank == 0) & + if (.not. cutBack .and. worldrank == 0) then write(statUnit,*) totalIncsCounter, time, cutBackLevel, & solres%converged, solres%iterationsNeeded ! write statistics about accepted solution + flush(statUnit) + endif end do subStepLooping cutBackLevel = max(0, cutBackLevel - 1) ! try half number of subincs next inc diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 29deaf724..28ad70d94 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -52,13 +52,13 @@ contains !-------------------------------------------------------------------------------------------------- subroutine parallelization_init - integer(MPI_INTEGER_KIND) :: err_MPI, typeSize + integer(MPI_INTEGER_KIND) :: err_MPI, typeSize, version, subversion, devNull character(len=4) :: rank_str + character(len=MPI_MAX_LIBRARY_VERSION_STRING) :: MPI_library_version !$ integer :: got_env, threadLevel !$ integer(pI32) :: OMP_NUM_THREADS !$ character(len=6) NumThreadsString - PetscErrorCode :: err_PETSc #ifdef _OPENMP ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. @@ -86,12 +86,22 @@ subroutine parallelization_init if (err_MPI /= 0_MPI_INTEGER_KIND) & error stop 'Could not determine worldrank' - if (worldrank == 0) print'(/,1x,a)', '<<<+- parallelization init -+>>>' + if (worldrank == 0) then + print'(/,1x,a)', '<<<+- parallelization init -+>>>' + + call MPI_Get_library_version(MPI_library_version,devNull,err_MPI) + print'(/,1x,a)', trim(MPI_library_version) + call MPI_Get_version(version,subversion,err_MPI) + print'(1x,a,i0,a,i0)', 'MPI standard: ',version,'.',subversion +#ifdef _OPENMP + print'(1x,a,i0)', 'OpenMP version: ',openmp_version +#endif + end if call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) & error stop 'Could not determine worldsize' - if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize + if (worldrank == 0) print'(/,1x,a,i0)', 'MPI processes: ',worldsize call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) & @@ -128,7 +138,7 @@ subroutine parallelization_init !$ OMP_NUM_THREADS = 4_pI32 !$ endif !$ endif -!$ print'(1x,a,1x,i2)', 'OMP_NUM_THREADS:',OMP_NUM_THREADS +!$ print'(1x,a,i0)', 'OMP_NUM_THREADS: ',OMP_NUM_THREADS !$ call omp_set_num_threads(OMP_NUM_THREADS) end subroutine parallelization_init diff --git a/src/phase.f90 b/src/phase.f90 index 098cdebe8..3de551131 100644 --- a/src/phase.f90 +++ b/src/phase.f90 @@ -323,7 +323,6 @@ module phase phase_restore, & plastic_nonlocal_updateCompatibility, & converged, & - crystallite_init, & phase_mechanical_constitutive, & phase_thermal_constitutive, & phase_damage_constitutive, & @@ -401,6 +400,8 @@ subroutine phase_init call damage_init call thermal_init(phases) + call crystallite_init() + end subroutine phase_init @@ -502,22 +503,13 @@ subroutine crystallite_init() ce, & co, & !< counter in integration point component loop ip, & !< counter in integration point loop - el, & !< counter in element loop - cMax, & !< maximum number of integration point components - iMax, & !< maximum number of integration points - eMax !< maximum number of elements + el !< counter in element loop class(tNode), pointer :: & num_crystallite, & phases - print'(/,1x,a)', '<<<+- crystallite init -+>>>' - - cMax = homogenization_maxNconstituents - iMax = discretization_nIPs - eMax = discretization_Nelems - num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict) num%subStepMinCryst = num_crystallite%get_asFloat ('subStepMin', defaultVal=1.0e-3_pReal) @@ -551,15 +543,9 @@ subroutine crystallite_init() phases => config_material%get('phase') - print'(/,a42,1x,i10)', ' # of elements: ', eMax - print'( a42,1x,i10)', ' # of integration points/element: ', iMax - print'( a42,1x,i10)', 'max # of constituents/integration point: ', cMax - flush(IO_STDOUT) - - !$OMP PARALLEL DO PRIVATE(ce) - do el = 1, eMax - do ip = 1, iMax + do el = 1, discretization_Nelems + do ip = 1, discretization_nIPs ce = (el-1)*discretization_nIPs + ip do co = 1,homogenization_Nconstituents(material_homogenizationID(ce)) call crystallite_orientations(co,ip,el) diff --git a/src/phase_mechanical.f90 b/src/phase_mechanical.f90 index 733219d56..eb758d8ed 100644 --- a/src/phase_mechanical.f90 +++ b/src/phase_mechanical.f90 @@ -879,10 +879,10 @@ function integrateStateRK(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el,A,B,C,D plasticState(ph)%state(1:sizeDotState,en) = subState0 & + dotState * Delta_t - broken = integrateStress(F_0 + (F - F_0) * Delta_t * C(stage),subFp0,subFi0,Delta_t * C(stage),co,ip,el) + broken = integrateStress(F_0 + (F-F_0) * Delta_t*C(stage),subFp0,subFi0,Delta_t*C(stage),co,ip,el) if(broken) exit - dotState = plastic_dotState(Delta_t, co,ip,el,ph,en) + dotState = plastic_dotState(Delta_t*C(stage), co,ip,el,ph,en) if (any(IEEE_is_NaN(dotState))) exit enddo diff --git a/src/phase_mechanical_eigen_thermalexpansion.f90 b/src/phase_mechanical_eigen_thermalexpansion.f90 index a5d9868a8..3c422616b 100644 --- a/src/phase_mechanical_eigen_thermalexpansion.f90 +++ b/src/phase_mechanical_eigen_thermalexpansion.f90 @@ -27,7 +27,7 @@ module function thermalexpansion_init(kinematics_length) result(myKinematics) integer, intent(in) :: kinematics_length logical, dimension(:,:), allocatable :: myKinematics - integer :: Ninstances,p,i,k + integer :: Ninstances, p, k class(tNode), pointer :: & phases, & phase, & diff --git a/src/system_routines.f90 b/src/system_routines.f90 index 7a4e41a57..2eb0b7958 100644 --- a/src/system_routines.f90 +++ b/src/system_routines.f90 @@ -24,7 +24,7 @@ module system_routines function setCWD_C(cwd) bind(C) use, intrinsic :: ISO_C_Binding, only: C_INT, C_CHAR - + integer(C_INT) :: setCWD_C character(kind=C_CHAR), dimension(*), intent(in) :: cwd end function setCWD_C @@ -150,14 +150,14 @@ function getUserName() getUserName = c_f_string(getUserName_Cstring) else getUserName = 'n/a (Error!)' - endif + end if end function getUserName !-------------------------------------------------------------------------------------------------- -!> @brief convert C string to Fortran string -!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string +!> @brief Convert C string to Fortran string. +!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string. !-------------------------------------------------------------------------------------------------- pure function c_f_string(c_string) result(f_string) @@ -174,28 +174,23 @@ pure function c_f_string(c_string) result(f_string) else f_string = f_string(:i-1) exit - endif - enddo arrayToString + end if + end do arrayToString end function c_f_string !-------------------------------------------------------------------------------------------------- -!> @brief convert Fortran string to C string -!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string +!> @brief Convert Fortran string to C string. +!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string. !-------------------------------------------------------------------------------------------------- pure function f_c_string(f_string) result(c_string) character(len=*), intent(in) :: f_string character(kind=C_CHAR), dimension(len_trim(f_string)+1) :: c_string - integer :: i - - do i=1,len_trim(f_string) - c_string(i)=f_string(i:i) - enddo - c_string(len_trim(f_string)+1) = C_NULL_CHAR + c_string = transfer(trim(f_string)//C_NULL_CHAR,c_string,size=size(c_string)) end function f_c_string