From 35f9861818bbb3b3e6fdfd1d6f520c5fddc531c6 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 18 Dec 2020 15:19:04 +0100 Subject: [PATCH] Fortran standard is 2018 will not work for older compilers --- CMakeLists.txt | 76 +++------ PRIVATE | 2 +- cmake/Compiler-Intel.cmake | 2 +- python/damask/_config.py | 3 + python/tests/test_Table.py | 2 +- src/DAMASK_interface.f90 | 8 +- src/grid/grid_damage_spectral.f90 | 3 +- src/grid/grid_mech_FEM.f90 | 9 +- src/grid/grid_mech_spectral_basic.f90 | 9 +- src/grid/grid_mech_spectral_polarisation.f90 | 9 +- src/grid/grid_thermal_spectral.f90 | 3 +- src/grid/spectral_utilities.f90 | 23 ++- src/material.f90 | 2 +- src/mesh/mesh_mech_FEM.f90 | 33 +--- src/results.f90 | 169 ++++++++++--------- 15 files changed, 147 insertions(+), 206 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dd2348fd1..8db6dd0c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,18 @@ -######################################################################################## -# Compiler options for building DAMASK -cmake_minimum_required (VERSION 3.10.0 FATAL_ERROR) +cmake_minimum_required (VERSION 3.10.0) +include (FindPkgConfig REQUIRED) + +# Dummy project to determine compiler names and version +project (Prerequisites LANGUAGES) +set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/pkgconfig") +pkg_search_module (PETSC REQUIRED PETSc>3.12.0) +pkg_get_variable (CMAKE_Fortran_COMPILER PETSc fcompiler) +pkg_get_variable (CMAKE_C_COMPILER PETSc ccompiler) + +find_program (CAT_EXECUTABLE NAMES cat) +execute_process (COMMAND ${CAT_EXECUTABLE} ${PROJECT_SOURCE_DIR}/VERSION + RESULT_VARIABLE DAMASK_VERSION_RETURN + OUTPUT_VARIABLE DAMASK_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE) #--------------------------------------------------------------------------------------- # Find PETSc from system environment @@ -28,19 +40,10 @@ include ${petsc_conf_rules} include ${petsc_conf_variables} INCLUDE_DIRS := \${PETSC_FC_INCLUDES} LIBRARIES := \${PETSC_WITH_EXTERNAL_LIB} -COMPILERF := \${FC} -COMPILERC := \${CC} -LINKERNAME := \${FLINKER} includes: \t@echo \${INCLUDE_DIRS} extlibs: \t@echo \${LIBRARIES} -compilerf: -\t@echo \${COMPILERF} -compilerc: -\t@echo \${COMPILERC} -linker: -\t@echo \${LINKERNAME} ") # CMake will execute each target in the ${petsc_config_makefile} @@ -52,26 +55,10 @@ execute_process (COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${petsc_conf OUTPUT_VARIABLE petsc_includes OUTPUT_STRIP_TRAILING_WHITESPACE) # Find the PETSc external linking directory settings -# required for final linking, must be appended after the executable execute_process (COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${petsc_config_makefile} "extlibs" RESULT_VARIABLE PETSC_EXTERNAL_LIB_RETURN OUTPUT_VARIABLE petsc_external_lib OUTPUT_STRIP_TRAILING_WHITESPACE) -# PETSc specified fortran compiler -execute_process (COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${petsc_config_makefile} "compilerf" - RESULT_VARIABLE PETSC_MPIFC_RETURN - OUTPUT_VARIABLE PETSC_MPIFC - OUTPUT_STRIP_TRAILING_WHITESPACE) -# PETSc specified C compiler -execute_process (COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${petsc_config_makefile} "compilerc" - RESULT_VARIABLE PETSC_MPICC_RETURN - OUTPUT_VARIABLE PETSC_MPICC - OUTPUT_STRIP_TRAILING_WHITESPACE) -# PETSc specified linker (Fortran compiler + PETSc linking flags) -execute_process (COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${petsc_config_makefile} "linker" - RESULT_VARIABLE PETSC_LINKER_RETURN - OUTPUT_VARIABLE PETSC_LINKER - OUTPUT_STRIP_TRAILING_WHITESPACE) # Remove temporary makefile, no need to keep it anymore. file (REMOVE_RECURSE ${TEMPDIR}) @@ -90,14 +77,6 @@ endforeach (exlib) message ("Found PETSC_DIR:\n${PETSC_DIR}\n" ) message ("Found PETSC_INCLUDES:\n${PETSC_INCLUDES}\n" ) message ("Found PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n") -message ("Found PETSC_LINKER:\n${PETSC_LINKER}\n" ) -message ("Found MPI Fortran Compiler:\n${PETSC_MPIFC}\n" ) -message ("Found MPI C Compiler:\n${PETSC_MPICC}\n" ) - -# set compiler commands to match PETSc (needs to be done before defining the project) -# https://cmake.org/Wiki/CMake_FAQ#How_do_I_use_a_different_compiler.3F -set (CMAKE_Fortran_COMPILER "${PETSC_MPIFC}") -set (CMAKE_C_COMPILER "${PETSC_MPICC}") #--------------------------------------------------------------------------------------- # Now start to care about DAMASK @@ -105,17 +84,18 @@ set (CMAKE_C_COMPILER "${PETSC_MPICC}") # DAMASK solver defines project to build string(TOLOWER ${DAMASK_SOLVER} DAMASK_SOLVER) if (DAMASK_SOLVER STREQUAL "grid") - project (damask-grid Fortran C) + project (damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C) add_definitions (-DGrid) - message ("Building Grid Solver\n") elseif (DAMASK_SOLVER STREQUAL "mesh") - project (damask-mesh Fortran C) + project (damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C) add_definitions (-DMesh) - message ("Building Mesh Solver\n") else () message (FATAL_ERROR "Build target (DAMASK_SOLVER) is not defined") endif () -list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}") +add_definitions (-DPETSc) + +message ("\nBuilding ${CMAKE_PROJECT_NAME}\n") if (CMAKE_BUILD_TYPE STREQUAL "") set (CMAKE_BUILD_TYPE "RELEASE") @@ -153,17 +133,8 @@ if (CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") set (BUILDCMD_POST "${BUILDCMD_POST} -fsyntax-only") endif () -# Parse DAMASK version from VERSION file -find_program (CAT_EXECUTABLE NAMES cat) -execute_process (COMMAND ${CAT_EXECUTABLE} ${PROJECT_SOURCE_DIR}/VERSION - RESULT_VARIABLE DAMASK_VERSION_RETURN - OUTPUT_VARIABLE DAMASK_V - OUTPUT_STRIP_TRAILING_WHITESPACE) -add_definitions (-DDAMASKVERSION="${DAMASK_V}") - -# definition of other macros -add_definitions (-DPETSc) +list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") include (Compiler-Intel) elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") @@ -174,9 +145,8 @@ else () message (FATAL_ERROR "Compiler type (CMAKE_Fortran_COMPILER_ID) not recognized") endif () - set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${BUILDCMD_PRE} ${OPENMP_FLAGS} ${STANDARD_CHECK} ${OPTIMIZATION_FLAGS} ${COMPILE_FLAGS} ${PRECISION_FLAGS}") -set (CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${PETSC_LINKER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}") +set (CMAKE_Fortran_LINK_EXECUTABLE "${BUILDCMD_PRE} ${CMAKE_Fortran_COMPILER} ${OPENMP_FLAGS} ${OPTIMIZATION_FLAGS} ${LINKER_FLAGS}") if (CMAKE_BUILD_TYPE STREQUAL "DEBUG") set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${DEBUG_FLAGS}") diff --git a/PRIVATE b/PRIVATE index de65e1df5..e1a1048e1 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit de65e1df5a76362de93667e9820dbf330b56f96d +Subproject commit e1a1048e1f593683b4b432d41455bd236008c3ad diff --git a/cmake/Compiler-Intel.cmake b/cmake/Compiler-Intel.cmake index 719ed885b..5b551069e 100644 --- a/cmake/Compiler-Intel.cmake +++ b/cmake/Compiler-Intel.cmake @@ -20,7 +20,7 @@ endif () # -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules # (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172) -set (STANDARD_CHECK "-stand f15 -standard-semantics -assume nostd_mod_proc_name") +set (STANDARD_CHECK "-stand f18 -standard-semantics -assume nostd_mod_proc_name") set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel") # Link against shared Intel libraries instead of static ones diff --git a/python/damask/_config.py b/python/damask/_config.py index 24245f4bd..76955588f 100644 --- a/python/damask/_config.py +++ b/python/damask/_config.py @@ -21,6 +21,9 @@ class NiceDumper(yaml.SafeDumper): return self.represent_data(dict(data)) if isinstance(data, dict) and type(data) != dict else \ super().represent_data(data) + def ignore_aliases(self, data): + """No references.""" + return True class Config(dict): """YAML-based configuration.""" diff --git a/python/tests/test_Table.py b/python/tests/test_Table.py index ac5859ecb..8f617aff5 100644 --- a/python/tests/test_Table.py +++ b/python/tests/test_Table.py @@ -22,7 +22,7 @@ class TestTable: @pytest.mark.parametrize('N',[10,40]) def test_len(self,N): - len(Table(np.random.rand(N,3),{'X':3})) == N + assert len(Table(np.random.rand(N,3),{'X':3})) == N def test_get_scalar(self,default): d = default.get('s') diff --git a/src/DAMASK_interface.f90 b/src/DAMASK_interface.f90 index 41f421eb8..d38020225 100644 --- a/src/DAMASK_interface.f90 +++ b/src/DAMASK_interface.f90 @@ -10,7 +10,7 @@ !> and working directory. !-------------------------------------------------------------------------------------------------- #define PETSC_MAJOR 3 -#define PETSC_MINOR_MIN 10 +#define PETSC_MINOR_MIN 12 #define PETSC_MINOR_MAX 14 module DAMASK_interface @@ -392,7 +392,7 @@ end function makeRelativePath subroutine catchSIGTERM(signal) bind(C) integer(C_INT), value :: signal - interface_SIGTERM = .true. + call interface_setSIGTERM(.true.) print'(a,i0,a)', ' received signal ',signal, ', set SIGTERM=TRUE' @@ -417,7 +417,7 @@ end subroutine interface_setSIGTERM subroutine catchSIGUSR1(signal) bind(C) integer(C_INT), value :: signal - interface_SIGUSR1 = .true. + call interface_setSIGUSR1(.true.) print'(a,i0,a)', ' received signal ',signal, ', set SIGUSR1=TRUE' @@ -442,7 +442,7 @@ end subroutine interface_setSIGUSR1 subroutine catchSIGUSR2(signal) bind(C) integer(C_INT), value :: signal - interface_SIGUSR2 = .true. + call interface_setSIGUSR2(.true.) print'(a,i0,a)', ' received signal ',signal, ', set SIGUSR2=TRUE' diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 4c014f3c0..79437945b 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -203,8 +203,7 @@ function grid_damage_spectral_solution(timeinc) result(solution) call VecMax(solution_vec,devNull,phi_max,ierr); CHKERRQ(ierr) if (solution%converged) & print'(/,a)', ' ... nonlocal damage converged .....................................' - write(IO_STDOUT,'(/,a,f8.6,2x,f8.6,2x,e11.4,/)',advance='no') ' Minimum|Maximum|Delta Damage = ',& - phi_min, phi_max, stagNorm + print'(/,a,f8.6,2x,f8.6,2x,e11.4)', ' Minimum|Maximum|Delta Damage = ', phi_min, phi_max, stagNorm print'(/,a)', ' ===========================================================================' flush(IO_STDOUT) diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 146f28567..741ce404a 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -509,11 +509,10 @@ subroutine formResidual(da_local,x_local, & newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 print'(1x,a,3(a,i0))', trim(incInfo), ' @ Iteration ', num%itmin, '≤',totalIter+1, '≤', num%itmax - if (debugRotation) & - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim =', transpose(F_aim) + if (debugRotation) print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) + print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim =', transpose(F_aim) flush(IO_STDOUT) endif newIteration diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 4f5ceff61..d87a22fb7 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -471,11 +471,10 @@ subroutine formResidual(in, F, & newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 print'(1x,a,3(a,i0))', trim(incInfo), ' @ Iteration ', num%itmin, '≤',totalIter, '≤', num%itmax - if(debugRotation) & - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim =', transpose(F_aim) + if (debugRotation) print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) + print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim =', transpose(F_aim) flush(IO_STDOUT) endif newIteration diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index d09b7fcb2..9bbb40a53 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -552,11 +552,10 @@ subroutine formResidual(in, FandF_tau, & newIteration: if (totalIter <= PETScIter) then totalIter = totalIter + 1 print'(1x,a,3(a,i0))', trim(incInfo), ' @ Iteration ', num%itmin, '≤',totalIter, '≤', num%itmax - if (debugRotation) & - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) - write(IO_STDOUT,'(/,a,/,3(3(f12.7,1x)/))',advance='no') & - ' deformation gradient aim =', transpose(F_aim) + if (debugRotation) print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim (lab) =', transpose(params%rotation_BC%rotate(F_aim,active=.true.)) + print'(/,a,/,2(3(f12.7,1x)/),3(f12.7,1x))', & + ' deformation gradient aim =', transpose(F_aim) flush(IO_STDOUT) endif newIteration diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 68a1c5ed1..f5d1a33bc 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -197,8 +197,7 @@ function grid_thermal_spectral_solution(timeinc) result(solution) call VecMax(solution_vec,devNull,T_max,ierr); CHKERRQ(ierr) if (solution%converged) & print'(/,a)', ' ... thermal conduction converged ..................................' - write(IO_STDOUT,'(/,a,f8.4,2x,f8.4,2x,f8.4,/)',advance='no') ' Minimum|Maximum|Delta Temperature / K = ',& - T_min, T_max, stagNorm + print'(/,a,f8.4,2x,f8.4,2x,f8.4)', ' Minimum|Maximum|Delta Temperature / K = ', T_min, T_max, stagNorm print'(/,a)', ' ===========================================================================' flush(IO_STDOUT) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 6d6e26cae..989448dc3 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -688,8 +688,8 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) if(debugGeneral) then print'(/,a)', ' ... updating masked compliance ............................................' - write(IO_STDOUT,'(/,a,/,9(9(2x,f12.7,1x)/))',advance='no') ' Stiffness C (load) / GPa =',& - transpose(temp99_Real)*1.0e-9_pReal + print'(/,a,/,8(9(2x,f12.7,1x)/),9(2x,f12.7,1x))', & + ' Stiffness C (load) / GPa =', transpose(temp99_Real)*1.0e-9_pReal flush(IO_STDOUT) endif @@ -709,9 +709,8 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) if (debugGeneral .or. errmatinv) then write(formatString, '(i2)') size_reduced formatString = '(/,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))' - write(IO_STDOUT,trim(formatString),advance='no') ' C * S (load) ', & - transpose(matmul(c_reduced,s_reduced)) - write(IO_STDOUT,trim(formatString),advance='no') ' S (load) ', transpose(s_reduced) + print trim(formatString), ' C * S (load) ', transpose(matmul(c_reduced,s_reduced)) + print trim(formatString), ' S (load) ', transpose(s_reduced) if(errmatinv) error stop 'matrix inversion error' endif temp99_real = reshape(unpack(reshape(s_reduced,[size_reduced**2]),reshape(mask,[81]),0.0_pReal),[9,9]) @@ -722,7 +721,7 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C) utilities_maskedCompliance = math_99to3333(temp99_Real) if(debugGeneral) then - write(IO_STDOUT,'(/,a,/,9(9(2x,f10.5,1x)/),/)',advance='no') & + print'(/,a,/,9(9(2x,f10.5,1x)/),9(2x,f10.5,1x))', & ' Masked Compliance (load) * GPa =', transpose(temp99_Real)*1.0e9_pReal flush(IO_STDOUT) endif @@ -818,13 +817,11 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) - if (debugRotation) & - write(IO_STDOUT,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress (lab) / MPa =',& - transpose(P_av)*1.e-6_pReal - if(present(rotation_BC)) & - P_av = rotation_BC%rotate(P_av) - write(IO_STDOUT,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',& - transpose(P_av)*1.e-6_pReal + if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & + ' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal + if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av) + print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & + ' Piola--Kirchhoff stress / MPa =', transpose(P_av)*1.e-6_pReal flush(IO_STDOUT) dPdF_max = 0.0_pReal diff --git a/src/material.f90 b/src/material.f90 index 223ea6ed8..b05979298 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -176,7 +176,7 @@ subroutine material_init(restart) if (.not. restart) then call results_openJobFile - call results_mapping_constituent(material_phaseAt,material_phaseMemberAt,material_name_phase) + call results_mapping_phase(material_phaseAt,material_phaseMemberAt,material_name_phase) call results_mapping_homogenization(material_homogenizationAt,material_homogenizationMemberAt,material_name_homogenization) call results_closeJobFile endif diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index b6ce1e175..eb5f862c2 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -146,14 +146,9 @@ subroutine FEM_mech_init(fieldBC) call PetscFESetQuadrature(mechFE,mechQuad,ierr); CHKERRQ(ierr) call PetscFEGetDimension(mechFE,nBasis,ierr); CHKERRQ(ierr) nBasis = nBasis/nc -#if (PETSC_VERSION_MINOR > 10) call DMAddField(mech_mesh,PETSC_NULL_DMLABEL,mechFE,ierr); CHKERRQ(ierr) call DMCreateDS(mech_mesh,ierr); CHKERRQ(ierr) -#endif call DMGetDS(mech_mesh,mechDS,ierr); CHKERRQ(ierr) -#if (PETSC_VERSION_MINOR < 11) - call PetscDSAddDiscretization(mechDS,mechFE,ierr); CHKERRQ(ierr) -#endif call PetscDSGetTotalDimension(mechDS,cellDof,ierr); CHKERRQ(ierr) call PetscFEDestroy(mechFE,ierr); CHKERRQ(ierr) call PetscQuadratureDestroy(mechQuad,ierr); CHKERRQ(ierr) @@ -162,11 +157,7 @@ subroutine FEM_mech_init(fieldBC) ! Setup FEM mech boundary conditions call DMGetLabel(mech_mesh,'Face Sets',BCLabel,ierr); CHKERRQ(ierr) call DMPlexLabelComplete(mech_mesh,BCLabel,ierr); CHKERRQ(ierr) -#if (PETSC_VERSION_MINOR < 12) - call DMGetSection(mech_mesh,section,ierr); CHKERRQ(ierr) -#else call DMGetLocalSection(mech_mesh,section,ierr); CHKERRQ(ierr) -#endif allocate(pnumComp(1), source=dimPlex) allocate(pnumDof(0:dimPlex), source = 0) do topologDim = 0, dimPlex @@ -204,14 +195,8 @@ subroutine FEM_mech_init(fieldBC) endif endif enddo; enddo -#if (PETSC_VERSION_MINOR < 11) - call DMPlexCreateSection(mech_mesh,dimPlex,1,pNumComp,pNumDof, & - numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,ierr) -#else call DMPlexCreateSection(mech_mesh,nolabel,pNumComp,pNumDof, & numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,ierr) - -#endif CHKERRQ(ierr) call DMSetSection(mech_mesh,section,ierr); CHKERRQ(ierr) do faceSet = 1, numBC @@ -266,11 +251,7 @@ subroutine FEM_mech_init(fieldBC) x_scal(basis+1:basis+dimPlex) = pV0 + matmul(transpose(cellJMat),nodalPointsP + 1.0_pReal) enddo px_scal => x_scal -#if (PETSC_VERSION_MINOR < 11) - call DMPlexVecSetClosure(mech_mesh,section,solution_local,cell,px_scal,INSERT_ALL_VALUES,ierr) -#else - call DMPlexVecSetClosure(mech_mesh,section,solution_local,cell,px_scal,5,ierr) ! PETSc: cbee0a90b60958e5c50c89b1e41f4451dfa6008c -#endif + call DMPlexVecSetClosure(mech_mesh,section,solution_local,cell,px_scal,5,ierr) CHKERRQ(ierr) enddo @@ -353,11 +334,7 @@ subroutine FEM_mech_formResidual(dm_local,xx_local,f_local,dummy,ierr) allocate(pinvcellJ(dimPlex**2)) allocate(x_scal(cellDof)) -#if (PETSC_VERSION_MINOR < 12) - call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr) -#else call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) -#endif call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) CHKERRQ(ierr) @@ -500,11 +477,7 @@ subroutine FEM_mech_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) call MatZeroEntries(Jac,ierr); CHKERRQ(ierr) call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) -#if (PETSC_VERSION_MINOR < 12) - call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr) -#else call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) -#endif call DMGetGlobalSection(dm_local,gSection,ierr); CHKERRQ(ierr) call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) @@ -684,8 +657,8 @@ subroutine FEM_mech_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dumm print'(/,1x,a,a,i0,a,i0,f0.3)', trim(incInfo), & ' @ Iteration ',PETScIter,' mechanical residual norm = ', & int(fnorm/divTol),fnorm/divTol-int(fnorm/divTol) - write(IO_STDOUT,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',& - transpose(P_av)*1.e-6_pReal + print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & + ' Piola--Kirchhoff stress / MPa =',transpose(P_av)*1.e-6_pReal flush(IO_STDOUT) end subroutine FEM_mech_converged diff --git a/src/results.f90 b/src/results.f90 index f15ad4e4a..ea9fd62d4 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -49,7 +49,7 @@ module results results_setLink, & results_addAttribute, & results_removeLink, & - results_mapping_constituent, & + results_mapping_phase, & results_mapping_homogenization contains @@ -461,7 +461,7 @@ end subroutine results_writeTensorDataset_int !-------------------------------------------------------------------------------------------------- !> @brief adds the unique mapping from spatial position and constituent ID to results !-------------------------------------------------------------------------------------------------- -subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) +subroutine results_mapping_phase(phaseAt,memberAtLocal,label) integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element) integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element) @@ -491,6 +491,47 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) integer(SIZE_T) :: type_size_string, type_size_int integer :: hdferr, ierr, i +!-------------------------------------------------------------------------------------------------- +! prepare MPI communication (transparent for non-MPI runs) + call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) + if(hdferr < 0) error stop 'HDF5 error' + memberOffset = 0 + do i=1, size(label) + memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process + enddo + writeSize = 0 + writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process + +!-------------------------------------------------------------------------------------------------- +! MPI settings and communication +#ifdef PETSc + call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) + if(hdferr < 0) error stop 'HDF5 error' + + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + if(ierr /= 0) error stop 'MPI error' + + call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process + if(ierr /= 0) error stop 'MPI error' +#endif + + myShape = int([size(phaseAt,1),writeSize(worldrank)], HSIZE_T) + myOffset = int([0,sum(writeSize(0:worldrank-1))], HSIZE_T) + totalShape = int([size(phaseAt,1),sum(writeSize)], HSIZE_T) + + +!--------------------------------------------------------------------------------------------------- +! expand phaseAt to consider IPs (is not stored per IP) + do i = 1, size(phaseAtMaterialpoint,2) + phaseAtMaterialpoint(:,i,:) = phaseAt + enddo + +!--------------------------------------------------------------------------------------------------- +! renumber member from my process to all processes + do i = 1, size(label) + where(phaseAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based + enddo + !--------------------------------------------------------------------------------------------------- ! compound type: name of phase section + position/index within results array call h5tcopy_f(H5T_NATIVE_CHARACTER, dt_id, hdferr) @@ -525,34 +566,6 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) call h5tclose_f(dt_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -!-------------------------------------------------------------------------------------------------- -! prepare MPI communication (transparent for non-MPI runs) - call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) - if(hdferr < 0) error stop 'HDF5 error' - memberOffset = 0 - do i=1, size(label) - memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process - enddo - writeSize = 0 - writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process - -!-------------------------------------------------------------------------------------------------- -! MPI settings and communication -#ifdef PETSc - call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) - if(hdferr < 0) error stop 'HDF5 error' - - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' - - call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' -#endif - - myShape = int([size(phaseAt,1),writeSize(worldrank)], HSIZE_T) - myOffset = int([0,sum(writeSize(0:worldrank-1))], HSIZE_T) - totalShape = int([size(phaseAt,1),sum(writeSize)], HSIZE_T) - !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape = hyperslab) and in file (global shape) call h5screate_simple_f(2,myShape,memspace_id,hdferr,myShape) @@ -564,18 +577,6 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) call h5sselect_hyperslab_f(filespace_id, H5S_SELECT_SET_F, myOffset, myShape, hdferr) if(hdferr < 0) error stop 'HDF5 error' -!--------------------------------------------------------------------------------------------------- -! expand phaseAt to consider IPs (is not stored per IP) - do i = 1, size(phaseAtMaterialpoint,2) - phaseAtMaterialpoint(:,i,:) = phaseAt - enddo - -!--------------------------------------------------------------------------------------------------- -! renumber member from my process to all processes - do i = 1, size(label) - where(phaseAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) -1 ! convert to 0-based - enddo - !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually call h5pset_preserve_f(plist_id, .TRUE., hdferr) @@ -609,7 +610,7 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label) if(hdferr < 0) error stop 'HDF5 error' call h5tclose_f(position_id, hdferr) -end subroutine results_mapping_constituent +end subroutine results_mapping_phase !-------------------------------------------------------------------------------------------------- @@ -645,6 +646,48 @@ subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label) integer(SIZE_T) :: type_size_string, type_size_int integer :: hdferr, ierr, i + +!-------------------------------------------------------------------------------------------------- +! prepare MPI communication (transparent for non-MPI runs) + call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) + if(hdferr < 0) error stop 'HDF5 error' + memberOffset = 0 + do i=1, size(label) + memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAtLocal,1) ! number of points/instance of this process + enddo + writeSize = 0 + writeSize(worldrank) = size(memberAtLocal) ! total number of points by this process + +!-------------------------------------------------------------------------------------------------- +! MPI settings and communication +#ifdef PETSc + call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) + if(hdferr < 0) error stop 'HDF5 error' + + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + if(ierr /= 0) error stop 'MPI error' + + call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process + if(ierr /= 0) error stop 'MPI error' +#endif + + myShape = int([writeSize(worldrank)], HSIZE_T) + myOffset = int([sum(writeSize(0:worldrank-1))], HSIZE_T) + totalShape = int([sum(writeSize)], HSIZE_T) + + +!--------------------------------------------------------------------------------------------------- +! expand phaseAt to consider IPs (is not stored per IP) + do i = 1, size(homogenizationAtMaterialpoint,1) + homogenizationAtMaterialpoint(i,:) = homogenizationAt + enddo + +!--------------------------------------------------------------------------------------------------- +! renumber member from my process to all processes + do i = 1, size(label) + where(homogenizationAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based + enddo + !--------------------------------------------------------------------------------------------------- ! compound type: name of phase section + position/index within results array call h5tcopy_f(H5T_NATIVE_CHARACTER, dt_id, hdferr) @@ -679,34 +722,6 @@ subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label) call h5tclose_f(dt_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -!-------------------------------------------------------------------------------------------------- -! prepare MPI communication (transparent for non-MPI runs) - call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) - if(hdferr < 0) error stop 'HDF5 error' - memberOffset = 0 - do i=1, size(label) - memberOffset(i,worldrank) = count(homogenizationAt == i)*size(memberAtLocal,1) ! number of points/instance of this process - enddo - writeSize = 0 - writeSize(worldrank) = size(memberAtLocal) ! total number of points by this process - -!-------------------------------------------------------------------------------------------------- -! MPI settings and communication -#ifdef PETSc - call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) - if(hdferr < 0) error stop 'HDF5 error' - - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' - - call MPI_allreduce(MPI_IN_PLACE,memberOffset,size(memberOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' -#endif - - myShape = int([writeSize(worldrank)], HSIZE_T) - myOffset = int([sum(writeSize(0:worldrank-1))], HSIZE_T) - totalShape = int([sum(writeSize)], HSIZE_T) - !-------------------------------------------------------------------------------------------------- ! create dataspace in memory (local shape = hyperslab) and in file (global shape) call h5screate_simple_f(1,myShape,memspace_id,hdferr,myShape) @@ -718,18 +733,6 @@ subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label) call h5sselect_hyperslab_f(filespace_id, H5S_SELECT_SET_F, myOffset, myShape, hdferr) if(hdferr < 0) error stop 'HDF5 error' -!--------------------------------------------------------------------------------------------------- -! expand phaseAt to consider IPs (is not stored per IP) - do i = 1, size(homogenizationAtMaterialpoint,1) - homogenizationAtMaterialpoint(i,:) = homogenizationAt - enddo - -!--------------------------------------------------------------------------------------------------- -! renumber member from my process to all processes - do i = 1, size(label) - where(homogenizationAtMaterialpoint == i) memberAtGlobal = memberAtLocal + sum(memberOffset(i,0:worldrank-1)) - 1 ! convert to 0-based - enddo - !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually call h5pset_preserve_f(plist_id, .TRUE., hdferr)