diff --git a/CMakeLists.txt b/CMakeLists.txt index a6a133fe4..ceddd8c53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,27 +15,27 @@ pkg_get_variable (CMAKE_Fortran_COMPILER PETSc fcompiler) pkg_get_variable (CMAKE_C_COMPILER PETSc ccompiler) # Solver determines name of project -string(TOLOWER "${DAMASK_SOLVER}" DAMASK_SOLVER) -if (DAMASK_SOLVER STREQUAL "grid") +string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER) +if (DAMASK_SOLVER STREQUAL "GRID") project (damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C) - add_definitions (-DGrid) -elseif (DAMASK_SOLVER STREQUAL "mesh") +elseif (DAMASK_SOLVER STREQUAL "MESH") project (damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C) - add_definitions (-DMesh) else () - message (FATAL_ERROR "Build target (DAMASK_SOLVER) is not defined") + message (FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}") endif () +add_definitions ("-D${DAMASK_SOLVER}") file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION) message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n") -add_definitions (-DPETSc) +add_definitions (-DPETSC) add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}") if (CMAKE_BUILD_TYPE STREQUAL "") set (CMAKE_BUILD_TYPE "RELEASE") endif () +string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE) # Predefined sets for OPTIMIZATION/OPENMP based on BUILD_TYPE if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") @@ -45,9 +45,15 @@ if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") elseif (CMAKE_BUILD_TYPE STREQUAL "RELEASE") set (PARALLEL "ON") set (OPTI "DEFENSIVE") +elseif (CMAKE_BUILD_TYPE STREQUAL "DEBUGRELEASE") + set (DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG") + set (PARALLEL "ON") + set (OPTI "DEFENSIVE") elseif (CMAKE_BUILD_TYPE STREQUAL "PERFORMANCE") set (PARALLEL "ON") set (OPTI "AGGRESSIVE") +else () + message (FATAL_ERROR "Invalid build type: CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}") endif () # $OPTIMIZATION takes precedence over $BUILD_TYPE defaults diff --git a/DAMASK_prerequisites.sh b/DAMASK_prerequisites.sh index 181fd46b5..f0ae484e4 100755 --- a/DAMASK_prerequisites.sh +++ b/DAMASK_prerequisites.sh @@ -56,15 +56,12 @@ echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX echo System report for \'$(hostname)\' created on $(date '+%Y-%m-%d %H:%M:%S') by \'$(whoami)\' echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -firstLevel "DAMASK settings" +firstLevel "DAMASK" secondLevel "DAMASK_ROOT" echo $DAMASK_ROOT echo secondLevel "Version" cat VERSION -echo -secondLevel "Settings in CONFIG" -cat env/CONFIG firstLevel "System" uname -a @@ -75,20 +72,26 @@ echo PYTHONPATH: $PYTHONPATH echo SHELL: $SHELL echo PETSC_ARCH: $PETSC_ARCH echo PETSC_DIR: $PETSC_DIR -ls $PETSC_DIR/lib +echo +echo $PETSC_DIR/$PETSC_ARCH/lib: +/s $PETSC_DIR/$PETSC_ARCH/lib +echo +echo $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables: +cat $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables + firstLevel "Python" DEFAULT_PYTHON=python3 -for executable in python python3; do - getDetails $executable '--version' +for EXECUTABLE in python python3; do + getDetails $EXECUTABLE '--version' done secondLevel "Details on $DEFAULT_PYTHON:" echo $(ls -la $(which $DEFAULT_PYTHON)) -for module in numpy scipy pandas matplotlib yaml h5py;do +for MODULE in numpy scipy pandas matplotlib yaml h5py;do thirdLevel $module - $DEFAULT_PYTHON -c "import $module; \ - print('Version: {}'.format($module.__version__)); \ - print('Location: {}'.format($module.__file__))" + $DEFAULT_PYTHON -c "import $MODULE; \ + print('Version: {}'.format($MODULE.__version__)); \ + print('Location: {}'.format($MODULE.__file__))" done thirdLevel vtk $DEFAULT_PYTHON -c "import vtk; \ @@ -96,23 +99,23 @@ $DEFAULT_PYTHON -c "import vtk; \ print('Location: {}'.format(vtk.__file__))" firstLevel "GNU Compiler Collection" -for executable in gcc g++ gfortran ;do - getDetails $executable '--version' +for EXECUTABLE in gcc g++ gfortran ;do + getDetails $EXECUTABLE '--version' done firstLevel "Intel Compiler Suite" -for executable in icc icpc ifort ;do - getDetails $executable '--version' +for EXECUTABLE in icc icpc ifort ;do + getDetails $EXECUTABLE '--version' done firstLevel "MPI Wrappers" -for executable in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do - getDetails $executable '-show' +for EXECUTABLE in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do + getDetails $EXECUTABLE '-show' done firstLevel "MPI Launchers" -for executable in mpirun mpiexec; do - getDetails $executable '--version' +for EXECUTABLE in mpirun mpiexec; do + getDetails $EXECUTABLE '--version' done firstLevel "CMake" diff --git a/Makefile b/Makefile index dfbcbd5ae..e73205702 100644 --- a/Makefile +++ b/Makefile @@ -9,13 +9,13 @@ all: grid mesh .PHONY: grid grid: - @cmake -B build/grid -DDAMASK_SOLVER=GRID -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} + @cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} @cmake --build build/grid --parallel @cmake --install build/grid .PHONY: mesh mesh: - @cmake -B build/mesh -DDAMASK_SOLVER=MESH -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} + @cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP} @cmake --build build/mesh --parallel @cmake --install build/mesh diff --git a/PRIVATE b/PRIVATE index d39905021..8fec909d1 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit d399050216d814627edbe0ff1c05afe8f76c7b40 +Subproject commit 8fec909d1931b092b223b0560dd30c3339c6e5a7 diff --git a/examples/Marc/material.yaml b/examples/Marc/material.yaml index a02a9e739..bebd5dd81 100644 --- a/examples/Marc/material.yaml +++ b/examples/Marc/material.yaml @@ -9,19 +9,19 @@ phase: lattice: cF mechanical: output: [F, P, F_e, F_p, L_p, O] - elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9} + elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9} plastic: type: phenopowerlaw N_sl: [12] a_sl: 2.25 atol_xi: 1.0 dot_gamma_0_sl: 0.001 - h_0_sl-sl: 75e6 + h_0_sl-sl: 75.e+6 h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] n_sl: 20 output: [xi_sl] - xi_0_sl: [31e6] - xi_inf_sl: [63e6] + xi_0_sl: [31.e+6] + xi_inf_sl: [63.e+6] material: - homogenization: SX diff --git a/examples/Marc/sheet_r-value.dat b/examples/Marc/sheet_r-value.dat index 529f4d31a..176bd34eb 100644 --- a/examples/Marc/sheet_r-value.dat +++ b/examples/Marc/sheet_r-value.dat @@ -14,7 +14,7 @@ $no list large stra 2 1 0 0 0 0 0 all points no echo 1 2 3 4 -state vars 3 +state vars 2 end $................... solver @@ -269,7 +269,7 @@ coordinates 163 3.999999999999999+1 9.999999999999996+0-5.000000000000000-1 164 3.999999999999999+1 9.999999999999996+0 0.000000000000000+0 165 3.999999999999999+1 9.999999999999996+0 5.000000000000000-1 -define element set Material_Nummer_elements +define element set DAMASK_elements 1 to 80 define node set unten_y_nodes 2 5 8 11 14 @@ -283,8 +283,6 @@ define node set unten_z_nodes 7 to 9 define node set oben_z_nodes 157 to 159 -define element set texture_elements - 1 to 80 hypoelastic 1 0 1 0 1TKS 0 @@ -346,22 +344,14 @@ unten_fest_nodes oben_ziehen_nodes initial state - 2 6 1 0 0 0Material_Nummer - 1.000000000000000+0 + 2 6 1 0 0 0DAMASK + 0.000000000000000+0 0 1 -Material_Nummer_elements -initial state - - 3 6 1 0 0 0texture - 1.000000000000000+0 - 0 - 1 -texture_elements +DAMASK_elements loadcase r-value 5 -Material_Nummer -texture +DAMASK unten_z unten_y unten_fest diff --git a/examples/config/Phase_DisloUCLA_Tungsten.config b/examples/config/Phase_DisloUCLA_Tungsten.config index e680213d5..1eef32577 100644 --- a/examples/config/Phase_DisloUCLA_Tungsten.config +++ b/examples/config/Phase_DisloUCLA_Tungsten.config @@ -6,21 +6,8 @@ plasticity disloucla (output) dipole_density (output) shear_rate_slip (output) accumulated_shear_slip -(output) mfp_slip (output) resolved_stress_slip (output) threshold_stress_slip -(output) twin_fraction -(output) shear_rate_twin -(output) accumulated_shear_twin -(output) mfp_twin -(output) resolved_stress_twin -(output) threshold_stress_twin - -### Material parameters ### -lattice_structure bcc -C11 523.0e9 # From Marinica et al. Journal of Physics: Condensed Matter(2013) -C12 202.0e9 -C44 161.0e9 grainsize 2.7e-5 # Average grain size [m] 2.0e-5 SolidSolutionStrength 0.0 # Strength due to elements in solid solution diff --git a/examples/config/Phase_Dislotwin_TWIP-Steel-FeMnC.yaml b/examples/config/Phase_Dislotwin_TWIP-Steel-FeMnC.yaml index 31b58b0b9..fe51585be 100644 --- a/examples/config/Phase_Dislotwin_TWIP-Steel-FeMnC.yaml +++ b/examples/config/Phase_Dislotwin_TWIP-Steel-FeMnC.yaml @@ -1,41 +1,31 @@ -TWIP_Steel_FeMnC: - lattice: cF - mechanical: - elastic: {type: Hooke, C_11: 175.0e9, C_12: 115.0e9, C_44: 135.0e9} - plastic: - type: dislotwin - output: [rho_mob, rho_dip, gamma_sl, Lambda_sl, tau_pass, f_tw, Lambda_tw, tau_hat_tw, f_tr] - D: 2.0e-5 - N_sl: [12] - b_sl: [2.56e-10] - rho_mob_0: [1.0e12] - rho_dip_0: [1.0] - v_0: [1.0e4] - Q_s: [3.7e-19] - p_sl: [1.0] - q_sl: [1.0] - tau_0: [1.5e8] - i_sl: [10.0] # Adj. parameter controlling dislocation mean free path - D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s - D_a: 1.0 # minimum dipole distance / b - Q_cl: 4.5e-19 # Activation energy for climb / J - h_sl-sl: [0.122, 0.122, 0.625, 0.07, 0.137, 0.137, 0.122] # Interaction coefficients (Kubin et al. 2008) -# shear band parameters - xi_sb: 180.0e6 - Q_sb: 3.7e-19 - p_sb: 1.0 - q_sb: 1.0 - v_sb: 0.0 # set to 0, to turn it off +type: dislotwin +output: [rho_mob, rho_dip, gamma_sl, Lambda_sl, tau_pass, f_tw, Lambda_tw, tau_hat_tw, f_tr] +D: 2.0e-5 +N_sl: [12] +b_sl: [2.56e-10] +rho_mob_0: [1.0e+12] +rho_dip_0: [1.0] +v_0: [1.0e+4] +Q_s: [3.7e-19] +p_sl: [1.0] +q_sl: [1.0] +tau_0: [1.5e+8] +i_sl: [10.0] # Adj. parameter controlling dislocation mean free path +D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s +D_a: 1.0 # minimum dipole distance / b +Q_cl: 4.5e-19 # Activation energy for climb / J +h_sl-sl: [0.122, 0.122, 0.625, 0.07, 0.137, 0.137, 0.122] # Interaction coefficients (Kubin et al. 2008) # twinning parameters - N_tw: [12] - b_tw: [1.47e-10] # Burgers vector length of twin system / b - t_tw: [5.0e-8] # Twin stack mean thickness / m - L_tw: 442.0 # Length of twin nuclei / b - x_c_tw: 1.0e-9 # critical distance for formation of twin nucleus / m - V_cs: 1.67e-29 # cross slip volume / m^3 - p_tw: [10.0] # r-exponent in twin formation probability - i_tw: 1.0 # Adj. parameter controlling twin mean free path - h_sl-tw: [0.0, 1.0, 1.0] # dislocation-twin interaction coefficients - h_tw-tw: [0.0, 1.0] # twin-twin interaction coefficients - Gamma_sf_0K: -0.0396 # stacking fault energy / J/m^2 at zero K; TWIP steel: -0.0526; Cu: -0.0396 - dGamma_sf_dT: 0.0002 # temperature dependence / J/(m^2 K) of stacking fault energy +N_tw: [12] +b_tw: [1.47e-10] # Burgers vector length of twin system / b +t_tw: [5.0e-8] # Twin stack mean thickness / m +L_tw: 442.0 # Length of twin nuclei / b +x_c_tw: 1.0e-9 # critical distance for formation of twin nucleus / m +V_cs: 1.67e-29 # cross slip volume / m^3 +p_tw: [10.0] # r-exponent in twin formation probability +i_tw: 1.0 # Adj. parameter controlling twin mean free path +h_sl-tw: [0.0, 1.0, 1.0] # dislocation-twin interaction coefficients +h_tw-tw: [0.0, 1.0] # twin-twin interaction coefficients +T_ref: 0.0 +Gamma_sf: -0.0396 # stacking fault energy / J/m^2 at zero K; TWIP steel: -0.0526; Cu: -0.0396 +Gamma_sf,T: 0.0002 # temperature dependence / J/(m^2 K) of stacking fault energy diff --git a/examples/config/Phase_Dislotwin_Tungsten.yaml b/examples/config/Phase_Dislotwin_Tungsten.yaml deleted file mode 100644 index 44d8d2260..000000000 --- a/examples/config/Phase_Dislotwin_Tungsten.yaml +++ /dev/null @@ -1,21 +0,0 @@ -Tungsten: - lattice: cI - mechanical: - elastic: {type: Hooke, C_11: 523.0e9, C_12: 202.0e9, C_44: 161.0e9} # Marinica et al. Journal of Physics: Condensed Matter(2013) - plastic: - type: dislotwin - D: 2.0e-5 # Average grain size / m - N_sl: [12] - b_sl: [2.72e-10] # Burgers vector length of slip families / m - rho_mob_0: [1.0e12] - rho_dip_0: [1.0] - v_0: [1.0e4] # Initial glide velocity / m/s - Q_s: [2.725e-19] # Activation energy for dislocation glide / J - p_sl: [0.78] # p-exponent in glide velocity - q_sl: [1.58] # q-exponent in glide velocity - tau_0: [1.5e8] # solid solution strength / Pa - i_sl: [10.0] # Adj. parameter controlling dislocation mean free path - D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s - D_a: 1.0 # minimum dipole distance / b - Q_cl: 4.5e-19 # Activation energy for climb / J - h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4] diff --git a/examples/config/Phase_Isotropic_FreeSurface.yaml b/examples/config/Phase_Isotropic_FreeSurface.yaml deleted file mode 100644 index 649baf992..000000000 --- a/examples/config/Phase_Isotropic_FreeSurface.yaml +++ /dev/null @@ -1,16 +0,0 @@ -FreeSurface: - lattice: cI - mechanical: - output: [F, P, F_e, F_p, L_p] - elastic: {type: Hooke, C_11: 1e8, C_12: 1e6, C_44: 4.95e7} - plastic: - type: isotropic - output: [xi] - xi_0: 0.3e6 - xi_inf: 0.6e6 - dot_gamma_0: 0.001 - n: 5 - M: 3 - h_0: 1e6 - a: 2 - dilatation: True diff --git a/examples/config/Phase_Phenopowerlaw_BCC-Martensite.yaml b/examples/config/Phase_Phenopowerlaw_BCC-Martensite.yaml index 1b1753870..3ad6779eb 100644 --- a/examples/config/Phase_Phenopowerlaw_BCC-Martensite.yaml +++ b/examples/config/Phase_Phenopowerlaw_BCC-Martensite.yaml @@ -4,14 +4,14 @@ Martensite: lattice: cI mechanical: - elastic: {C_11: 417.4e9, C_12: 242.4e9, C_44: 211.1e9, type: Hooke} + elastic: {C_11: 417.4e+9, C_12: 242.4e+9, C_44: 211.1e+9, type: Hooke} plastic: N_sl: [12, 12] a_sl: 2.0 dot_gamma_0_sl: 0.001 - h_0_sl-sl: 563.0e9 + h_0_sl-sl: 563.0e+9 h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4] n_sl: 20 type: phenopowerlaw - xi_0_sl: [405.8e6, 456.7e6] - xi_inf_sl: [872.9e6, 971.2e6] + xi_0_sl: [405.8e+6, 456.7e+6] + xi_inf_sl: [872.9e+6, 971.2e+6] diff --git a/examples/config/Phase_Phenopowerlaw_Magnesium.yaml b/examples/config/Phase_Phenopowerlaw_Magnesium.yaml index eb73e9867..4d4b9eb8e 100644 --- a/examples/config/Phase_Phenopowerlaw_Magnesium.yaml +++ b/examples/config/Phase_Phenopowerlaw_Magnesium.yaml @@ -1,28 +1,20 @@ -# Tromans 2011, Elastic Anisotropy of HCP Metal Crystals and Polycrystals -Magnesium: - lattice: hP - c/a: 1.62350 - mechanical: - output: [F, P, F_e, F_p, L_p, O] - elastic: {C_11: 59.3e9, C_12: 25.7e9, C_13: 21.4e9, C_33: 61.5e9, C_44: 16.4e9, type: Hooke} - plastic: - N_sl: [3, 3, 0, 6, 0, 6] - N_tw: [6, 0, 0, 6] - h_0_tw-tw: 50.0e6 - h_0_sl-sl: 500.0e6 - h_0_tw-sl: 150.0e6 - h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - h_tw-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - h_sl-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - h_tw-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - output: [xi_sl, xi_tw] - type: phenopowerlaw - xi_0_sl: [10.0e6, 55.0e6, 0, 60.0e6, 0.0, 60.0e6] - xi_inf_sl: [40.0e6, 135.0e6, 0, 150.0e6, 0.0, 150.0e6] - xi_0_tw: [40e6, 0.0, 0.0, 60.0e6] - a_sl: 2.25 - dot_gamma_0_sl: 0.001 - dot_gamma_0_tw: 0.001 - n_sl: 20 - n_tw: 20 - f_sat_sl-tw: 10.0 +N_sl: [3, 3, 0, 6, 0, 6] +N_tw: [6, 0, 0, 6] +h_0_tw-tw: 50.0e+6 +h_0_sl-sl: 500.0e+6 +h_0_tw-sl: 150.0e+6 +h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +h_tw-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +h_sl-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +h_tw-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] +output: [xi_sl, xi_tw] +type: phenopowerlaw +xi_0_sl: [10.e+6, 55.e+6, 0., 60.e+6, 0., 60.e+6] +xi_inf_sl: [40.e+6, 135.e+6, 0., 150.e+6, 0., 150.e+6] +xi_0_tw: [40.e+6, 0., 0., 60.e+6] +a_sl: 2.25 +dot_gamma_0_sl: 0.001 +dot_gamma_0_tw: 0.001 +n_sl: 20 +n_tw: 20 +f_sat_sl-tw: 10.0 diff --git a/examples/config/Phase_Phenopowerlaw_cpTi.yaml b/examples/config/Phase_Phenopowerlaw_cpTi.yaml deleted file mode 100644 index a63f011e6..000000000 --- a/examples/config/Phase_Phenopowerlaw_cpTi.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# M. Levy, Handbook of Elastic Properties of Solids, Liquids, and Gases (2001) -# C. Zambaldi, "Orientation informed nanoindentation of a-titanium: Indentation pileup in hexagonal metals deforming by prismatic slip", J. Mater. Res., Vol. 27, No. 1, Jan 14, 2012 -# Better use values from L. Wang, Z. Zheng, H. Phukan, P. Kenesei, J.-S. Park, J. Lind, R.M. Suter, T.R. Bieler, Direct measurement of critical resolved shear stress of prismatic and basal slip in polycrystalline Ti using high energy X-ray diffraction microscopy, Acta Mater 2017 -cpTi: - lattice: hP - c/a: 1.587 - mechanical: - output: [F, P, F_e, F_p, L_p, O] - elastic: {C_11: 160.0e9, C_12: 90.0e9, C_13: 66.0e9, C_33: 181.7e9, C_44: 46.5e9, type: Hooke} - plastic: - N_sl: [3, 3, 0, 6, 12] - a_sl: 2.0 - dot_gamma_0_sl: 0.001 - h_0_sl-sl: 200e6 - h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - n_sl: 20 - output: [gamma_sl] - type: phenopowerlaw - xi_0_sl: [0.15e9, 0.09e9, 0, 0.20e9, 0.25e9] - xi_inf_sl: [0.24e9, 0.5e9, 0, 0.6e9, 0.8e9] diff --git a/examples/config/numerics.yaml b/examples/config/numerics.yaml index 68a74e13c..72fbe82a2 100644 --- a/examples/config/numerics.yaml +++ b/examples/config/numerics.yaml @@ -30,7 +30,7 @@ grid: eps_div_rtol: 5.0e-4 # relative tolerance for fulfillment of stress equilibrium eps_curl_atol: 1.0e-12 # absolute tolerance for fulfillment of strain compatibility eps_curl_rtol: 5.0e-4 # relative tolerance for fulfillment of strain compatibility - eps_stress_atol: 1.0e3 # absolute tolerance for fulfillment of stress BC + eps_stress_atol: 1.0e+3 # absolute tolerance for fulfillment of stress BC eps_stress_rtol: 0.01 # relative tolerance for fulfillment of stress BC eps_damage_atol: 1.0e-2 # absolute tolerance for damage evolution eps_damage_rtol: 1.0e-6 # relative tolerance for damage evolution diff --git a/examples/config/phase/Al.yaml b/examples/config/phase/Al.yaml index 57004df14..f3c19924b 100644 --- a/examples/config/phase/Al.yaml +++ b/examples/config/phase/Al.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Aluminium + - https://en.wikipedia.org/wiki/Aluminium lattice: cF rho: 2700.0 diff --git a/examples/config/phase/Au.yaml b/examples/config/phase/Au.yaml index 23c1f30f1..fab05ad99 100644 --- a/examples/config/phase/Au.yaml +++ b/examples/config/phase/Au.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Gold + - https://en.wikipedia.org/wiki/Gold lattice: cF rho: 19300.0 diff --git a/examples/config/phase/Cu.yaml b/examples/config/phase/Cu.yaml index bcf9e8717..f1e1e9e36 100644 --- a/examples/config/phase/Cu.yaml +++ b/examples/config/phase/Cu.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Copper + - https://en.wikipedia.org/wiki/Copper lattice: cF rho: 8960.0 diff --git a/examples/config/phase/Fe.yaml b/examples/config/phase/Fe.yaml index cce09d634..271ff8661 100644 --- a/examples/config/phase/Fe.yaml +++ b/examples/config/phase/Fe.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Iron + - https://en.wikipedia.org/wiki/Iron lattice: cI rho: 7874.0 diff --git a/examples/config/phase/Mg.yaml b/examples/config/phase/Mg.yaml new file mode 100644 index 000000000..96eb6ad78 --- /dev/null +++ b/examples/config/phase/Mg.yaml @@ -0,0 +1,7 @@ +references: + - D. Tromans, + International Journal of Recent Research and Applied Studies 6(4):462-483, 2011, + https://www.arpapress.com/Volumes/Vol6Issue4/IJRRAS_6_4_14.pdf +lattice: hP +c/a: 1.62350 +rho: 1740.0 diff --git a/examples/config/phase/Ni.yaml b/examples/config/phase/Ni.yaml index 1f9316118..575984583 100644 --- a/examples/config/phase/Ni.yaml +++ b/examples/config/phase/Ni.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Nickel + - https://en.wikipedia.org/wiki/Nickel lattice: cF rho: 8908.0 diff --git a/examples/config/phase/Ti.yaml b/examples/config/phase/Ti.yaml index a9811786b..48eae3949 100644 --- a/examples/config/phase/Ti.yaml +++ b/examples/config/phase/Ti.yaml @@ -1,6 +1,6 @@ references: - - www.totalmateria.com/page.aspx?ID=CheckArticle&site=ktn&NM=221 - - en.wikipedia.org/wiki/Titanium + - https://www.totalmateria.com/page.aspx?ID=CheckArticle&site=ktn&NM=221 + - https://en.wikipedia.org/wiki/Titanium lattice: hP c/a: 1.587 rho: 4506.0 diff --git a/examples/config/phase/W.yaml b/examples/config/phase/W.yaml index c770bb891..8be7d38a6 100644 --- a/examples/config/phase/W.yaml +++ b/examples/config/phase/W.yaml @@ -1,4 +1,4 @@ references: - - en.wikipedia.org/wiki/Tungsten + - https://en.wikipedia.org/wiki/Tungsten lattice: cF rho: 19300.0 diff --git a/examples/config/phase/damage/anisobrittle_cubic.yaml b/examples/config/phase/damage/anisobrittle_cubic.yaml index 3539a24d2..5320a4aa9 100644 --- a/examples/config/phase/damage/anisobrittle_cubic.yaml +++ b/examples/config/phase/damage/anisobrittle_cubic.yaml @@ -1,11 +1,11 @@ type: anisobrittle N_cl: [3] -g_crit: [0.50e7] +g_crit: [0.5e+7] s_crit: [0.006666] -dot_o: 1e-3 +dot_o: 1.e-3 q: 20 output: [f_phi] -D_11: 1.0 -M: 0.001 +K_11: 1.0 +mu: 0.001 diff --git a/examples/config/phase/damage/isobrittle_generic.yaml b/examples/config/phase/damage/isobrittle_generic.yaml index b4416f995..98c9b9763 100644 --- a/examples/config/phase/damage/isobrittle_generic.yaml +++ b/examples/config/phase/damage/isobrittle_generic.yaml @@ -5,5 +5,5 @@ isoBrittle_atol: 0.01 output: [f_phi] -D_11: 1.0 -M: 0.001 +K_11: 1.0 +mu: 0.001 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_Al.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_Al.yaml index aa132609e..b0db392e1 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_Al.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_Al.yaml @@ -1,5 +1,5 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion + - https://en.wikipedia.org/wiki/Thermal_expansion A_11: 23.1e-6 T_ref: 293.15 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_Au.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_Au.yaml index 34e71a1bc..934b4f721 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_Au.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_Au.yaml @@ -1,5 +1,5 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion -A_11: 14e-6 + - https://en.wikipedia.org/wiki/Thermal_expansion +A_11: 14.e-6 T_ref: 293.15 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_C35E.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_C35E.yaml index 64d772512..13d99b0ed 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_C35E.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_C35E.yaml @@ -1,6 +1,7 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion, fitted from image description + - https://commons.wikimedia.org/wiki/File:Coefficient_dilatation_lineique_aciers.svg, + fitted from image description (Scilab code) A_11: 12.70371e-6 A_11,T: 7.54e-9 A_11,T^2: -1.0e-11 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_Cu.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_Cu.yaml index ea09c7fb3..4c82421f3 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_Cu.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_Cu.yaml @@ -1,5 +1,5 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion -A_11: 17e-6 + - https://en.wikipedia.org/wiki/Thermal_expansion +A_11: 17.e-6 T_ref: 293.15 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_Fe.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_Fe.yaml index b104e36f5..c0891ea6a 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_Fe.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_Fe.yaml @@ -1,5 +1,5 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion + - https://en.wikipedia.org/wiki/Thermal_expansion A_11: 11.8e-6 T_ref: 293.15 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_W.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_W.yaml index d223cdd7d..427731186 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_W.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_W.yaml @@ -1,5 +1,5 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion + - https://en.wikipedia.org/wiki/Thermal_expansion A_11: 4.5e-6 T_ref: 293.15 diff --git a/examples/config/phase/mechanical/eigen/thermalexpansion_X20Cr13.yaml b/examples/config/phase/mechanical/eigen/thermalexpansion_X20Cr13.yaml index f7c55d485..19b4cb485 100644 --- a/examples/config/phase/mechanical/eigen/thermalexpansion_X20Cr13.yaml +++ b/examples/config/phase/mechanical/eigen/thermalexpansion_X20Cr13.yaml @@ -1,6 +1,7 @@ type: thermalexpansion references: - - en.wikipedia.org/wiki/Thermal_expansion, fitted from image description + - https://commons.wikimedia.org/wiki/File:Coefficient_dilatation_lineique_aciers.svg + fitted from image description (Scilab code) A_11: 11.365e-6 A_11,T: 5.0e-9 T_ref: 273.0 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Al.yaml b/examples/config/phase/mechanical/elastic/Hooke_Al.yaml index 025f1ca25..6007f7695 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Al.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Al.yaml @@ -1,8 +1,8 @@ type: Hooke references: - J. Vallin et al., - Journal of Applied Physics 35(6), 1825-1826, 1964, - 10.1063/1.1713749 -C_11: 107.3e9 -C_12: 60.8e9 -C_44: 28.3e9 + Journal of Applied Physics 35(6):1825-1826, 1964, + https://doi.org/10.1063/1.1713749 +C_11: 107.3e+9 +C_12: 60.8e+9 +C_44: 28.3e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Au.yaml b/examples/config/phase/mechanical/elastic/Hooke_Au.yaml index 5683bfb24..452807a6e 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Au.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Au.yaml @@ -4,6 +4,6 @@ references: Theory of Dislocations, 1982, John Wiley & Sons, page 837 -C_11: 186e9 -C_12: 157e9 -C_44: 42e9 +C_11: 186.e+9 +C_12: 157.e+9 +C_44: 42.e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Cu.yaml b/examples/config/phase/mechanical/elastic/Hooke_Cu.yaml index b55c83bdf..7b458a421 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Cu.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Cu.yaml @@ -1,6 +1,7 @@ type: Hooke references: - - www.mit.edu/~6.777/matprops/copper.htm, fixed typo -C_11: 168.3e9 -C_12: 122.1e9 -C_44: 75.7e9 + - https://www.mit.edu/~6.777/matprops/copper.htm, + fixed typo +C_11: 168.3e+9 +C_12: 122.1e+9 +C_44: 75.7e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Fe.yaml b/examples/config/phase/mechanical/elastic/Hooke_Fe.yaml index b2912035f..e736829a6 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Fe.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Fe.yaml @@ -4,6 +4,6 @@ references: Theory of Dislocations, 1982, John Wiley & Sons, page 837 -C_11: 242e9 -C_12: 146.5e9 -C_44: 112e9 +C_11: 242.e9 +C_12: 146.5e+9 +C_44: 112.e9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Mg.yaml b/examples/config/phase/mechanical/elastic/Hooke_Mg.yaml new file mode 100644 index 000000000..1e08a94a9 --- /dev/null +++ b/examples/config/phase/mechanical/elastic/Hooke_Mg.yaml @@ -0,0 +1,10 @@ +type: Hooke +references: + - D. Tromans, + International Journal of Recent Research and Applied Studies 6(4):462-483, 2011, + https://www.arpapress.com/Volumes/Vol6Issue4/IJRRAS_6_4_14.pdf +C_11: 59.3e+9 +C_33: 61.5e+9 +C_44: 16.4e+9 +C_12: 25.7e+9 +C_13: 21.4e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Ni.yaml b/examples/config/phase/mechanical/elastic/Hooke_Ni.yaml index 15b684e4c..c11f664d3 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Ni.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Ni.yaml @@ -4,6 +4,6 @@ references: Theory of Dislocations, 1982, John Wiley & Sons, page 837 -C_11: 246.5e9 -C_12: 147.3e9 -C_44: 124.7e9 +C_11: 246.5e+9 +C_12: 147.3e+9 +C_44: 124.7e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_TWIP-steel.yaml b/examples/config/phase/mechanical/elastic/Hooke_TWIP-steel.yaml new file mode 100644 index 000000000..c31589955 --- /dev/null +++ b/examples/config/phase/mechanical/elastic/Hooke_TWIP-steel.yaml @@ -0,0 +1,11 @@ +type: Hooke +references: + - D. Music et al., + Applied Physics Letters 99(19):191904, 2007, + https://doi.org/10.1063/1.2807677 + - S.L. Wong et al., + Acta Materialia 118:140-151, 2016, + https://doi.org/10.1016/j.actamat.2016.07.032 +C_11: 175.0e+9 +C_12: 115.0e+9 +C_44: 135.0e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_Ti.yaml b/examples/config/phase/mechanical/elastic/Hooke_Ti.yaml index 8684614a4..ee8cae0ad 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_Ti.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_Ti.yaml @@ -1,10 +1,10 @@ type: Hooke references: - L. Wang et al., - Acta Materialia 132, 598-610, 2017, - 10.1016/j.actamat.2017.05.015 -C_11: 162.4e9 -C_33: 181.6e9 -C_44: 47.2e9 -C_12: 92e9 -C_13: 69e9 + Acta Materialia 132:598-610, 2017, + https://doi.org/10.1016/j.actamat.2017.05.015 +C_11: 162.4e+9 +C_33: 181.6e+9 +C_44: 47.2e+9 +C_12: 92.e+9 +C_13: 69.e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_W.yaml b/examples/config/phase/mechanical/elastic/Hooke_W.yaml index 58561106a..49bb7858a 100644 --- a/examples/config/phase/mechanical/elastic/Hooke_W.yaml +++ b/examples/config/phase/mechanical/elastic/Hooke_W.yaml @@ -1,8 +1,8 @@ type: Hooke references: - D. Cereceda et al., - International Journal of Plasticity, 78, 242-265, 2016, - 10.1016/j.ijplas.2015.09.002 -C_11: 523.e9 -C_12: 202.e9 -C_44: 161.e9 + International Journal of Plasticity 78:242-265, 2016, + https://doi.org/10.1016/j.ijplas.2015.09.002 +C_11: 523.e+9 +C_12: 202.e+9 +C_44: 161.e+9 diff --git a/examples/config/phase/mechanical/elastic/Hooke_vanishing-Poisson-ratio.yaml b/examples/config/phase/mechanical/elastic/Hooke_vanishing-Poisson-ratio.yaml new file mode 100644 index 000000000..9a69dda4e --- /dev/null +++ b/examples/config/phase/mechanical/elastic/Hooke_vanishing-Poisson-ratio.yaml @@ -0,0 +1,8 @@ +type: Hooke +references: + - T. Maiti and P. Eisenlohr, + Scripta Materialia 145:37-40, 2018, + https://doi.org/10.1016/j.scriptamat.2017.09.047 +C_11: 1.e+8 +C_12: 1.e+6 +C_44: 4.95e+7 diff --git a/examples/config/phase/mechanical/plastic/dislotwin_IF-steel.yaml b/examples/config/phase/mechanical/plastic/dislotwin_IF-steel.yaml index 80c63882d..5478aa869 100644 --- a/examples/config/phase/mechanical/plastic/dislotwin_IF-steel.yaml +++ b/examples/config/phase/mechanical/plastic/dislotwin_IF-steel.yaml @@ -1,24 +1,24 @@ type: dislotwin references: - K. Sedighiani et al., - International Journal of Plasticity, 134, 102779, 2020, - 10.1016/j.ijplas.2020.102779 + International Journal of Plasticity 134:102779, 2020, + https://doi.org/10.1016/j.ijplas.2020.102779 - K. Sedighiani et al., Mechanics of Materials, submitted output: [rho_dip, rho_mob] N_sl: [12, 12] b_sl: [2.49e-10, 2.49e-10] -rho_mob_0: [2.81e12, 2.8e12] +rho_mob_0: [2.81e12, 2.8e+12] rho_dip_0: [1.0, 1.0] # not given -v_0: [1.4e3, 1.4e3] +v_0: [1.4e+3, 1.4e+3] Q_s: [1.57e-19, 1.57e-19] # Delta_F -tau_0: [454e6, 454e6] +tau_0: [454.e+6, 454.e+6] p_sl: [0.325, 0.325] q_sl: [1.55, 1.55] i_sl: [23.3, 23.3] D_a: 7.4 # C_anni B: [0.001, 0.001] h_sl-sl: [0.1, 0.72, 0.1, 0.053, 0.053, 0.073, 0.137, 0.72, 0.72, 0.053, 0.053, 0.053, 0.053, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.137, 0.073, 0.073, 0.137, 0.073] -D_0: 4.000E-05 -Q_cl: 5.400E-19 # no recovery! -D: 40e-6 # estimated +D_0: 4.0e-05 +Q_cl: 5.4e-19 # no recovery! +D: 40.e-6 # estimated diff --git a/examples/config/phase/mechanical/plastic/isotropic_free-surface.yaml b/examples/config/phase/mechanical/plastic/isotropic_free-surface.yaml new file mode 100644 index 000000000..fba7effa8 --- /dev/null +++ b/examples/config/phase/mechanical/plastic/isotropic_free-surface.yaml @@ -0,0 +1,15 @@ +type: isotropic +references: + - T. Maiti and P. Eisenlohr, + Scripta Materialia 145:37-40, 2018, + https://doi.org/10.1016/j.scriptamat.2017.09.047 +output: [xi] +dot_gamma_0: 0.001 +n: 20. +xi_0: 0.3e+6 +xi_inf: 0.6e+6 +a: 2. +h_0: 1.e+6 +M: 1. +h: 1. +dilatation: True diff --git a/examples/config/phase/mechanical/plastic/nonlocal_Al.yaml b/examples/config/phase/mechanical/plastic/nonlocal_Al.yaml index a4a4babb1..7ee79f9e5 100644 --- a/examples/config/phase/mechanical/plastic/nonlocal_Al.yaml +++ b/examples/config/phase/mechanical/plastic/nonlocal_Al.yaml @@ -2,7 +2,8 @@ type: nonlocal references: C. Kords, On the role of dislocation transport in the constitutive description of crystal plasticity, - RWTH Aachen 2013 + RWTH Aachen 2013, + http://publications.rwth-aachen.de/record/229993/files/4862.pdf output: [rho_u_ed_pos, rho_b_ed_pos, rho_u_ed_neg, rho_b_ed_neg, rho_u_sc_pos, rho_b_sc_pos, rho_u_sc_neg, rho_b_sc_neg, rho_d_ed, rho_d_sc] N_sl: [12] @@ -32,7 +33,7 @@ w: 10 # w_k in multiple of b p_sl: 1 q_sl: 1 -nu_a: 50.e9 +nu_a: 50.e+9 B: 1.e-2 f_ed: 1.0 # k_3 @@ -46,4 +47,4 @@ sigma_rho_u: 0 # no random distribution short_range_stress_correction: false -rho_significant: 1e6 +rho_significant: 1.e6 diff --git a/examples/config/phase/mechanical/plastic/nonlocal_Ni.yaml b/examples/config/phase/mechanical/plastic/nonlocal_Ni.yaml index 81b4becd1..c14b912b1 100644 --- a/examples/config/phase/mechanical/plastic/nonlocal_Ni.yaml +++ b/examples/config/phase/mechanical/plastic/nonlocal_Ni.yaml @@ -2,7 +2,8 @@ type: nonlocal references: C. Kords, On the role of dislocation transport in the constitutive description of crystal plasticity, - RWTH Aachen 2013 + RWTH Aachen 2013, + http://publications.rwth-aachen.de/record/229993/files/4862.pdf output: [rho_u_ed_pos, rho_b_ed_pos, rho_u_ed_neg, rho_b_ed_neg, rho_u_sc_pos, rho_b_sc_pos, rho_u_sc_neg, rho_b_sc_neg, rho_d_ed, rho_d_sc] N_sl: [12] @@ -13,10 +14,10 @@ d_sc: [12.e-9] i_sl: [45] # k_2 f_ed_mult: 0.1 # k_1 -rho_u_ed_neg_0: [6.e10] # 2.88e12 / (12*4) -rho_u_ed_pos_0: [6.e10] # 2.88e12 / (12*4) -rho_u_sc_neg_0: [6.e10] # 2.88e12 / (12*4) -rho_u_sc_pos_0: [6.e10] # 2.88e12 / (12*4) +rho_u_ed_neg_0: [6.e+10] # 2.88e12 / (12*4) +rho_u_ed_pos_0: [6.e+10] # 2.88e12 / (12*4) +rho_u_sc_neg_0: [6.e+10] # 2.88e12 / (12*4) +rho_u_sc_pos_0: [6.e+10] # 2.88e12 / (12*4) rho_d_ed_0: [0] rho_d_sc_0: [0] @@ -32,7 +33,7 @@ w: 10 # w_k p_sl: 1 q_sl: 1 -nu_a: 50.e9 +nu_a: 50.e+9 B: 1.e-3 f_ed: 0.01 # k_3 @@ -46,4 +47,4 @@ sigma_rho_u: 0 # no random distribution short_range_stress_correction: false -rho_significant: 1e6 +rho_significant: 1.e6 diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Al.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Al.yaml index 4e4bcf588..8a0692c9b 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Al.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Al.yaml @@ -1,15 +1,15 @@ type: phenopowerlaw references: - W.F. Hosford et al., - Acta Metallurgica, 8(3), 187-199, 1960, - 10.1016/0001-6160(60)90127-9, + Acta Metallurgica 8(3):187-199, 1960, + https://doi.org/10.1016/0001-6160(60)90127-9, fitted from Fig. 5 output: [xi_sl, gamma_sl] N_sl: [12] n_sl: 20 a_sl: 3.1 -h_0_sl-sl: 1.7e8 -xi_0_sl: [5.0e6] -xi_inf_sl: [37.5e6] +h_0_sl-sl: 1.7e+8 +xi_0_sl: [5.0e+6] +xi_inf_sl: [37.5e+6] h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] dot_gamma_0_sl: 4.5e-3 diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Au.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Au.yaml index cb2e6fea1..b5a034c60 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Au.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Au.yaml @@ -1,17 +1,17 @@ type: phenopowerlaw references: - D. Ma et al., - Acta Materialia, 103, 796-808, 2016, - 10.1016/j.actamat.2015.11.016 + Acta Materialia 103:796-808, 2016, + https://doi.org/10.1016/j.actamat.2015.11.016 - I. Kovács and G.Vörös, - International Journal of Plasticity, 12, 35-43, 1996, - 10.1016/S0749-6419(95)00043-7 + International Journal of Plasticity 12:35-43, 1996, + https://doi.org/10.1016/S0749-6419(95)00043-7 output: [xi_sl, gamma_sl] N_sl: [12] n_sl: 83.3 a_sl: 1.0 -h_0_sl-sl: 75.0e6 -xi_0_sl: [26.25e6] -xi_inf_sl: [53.0e6] +h_0_sl-sl: 75.0e+6 +xi_0_sl: [26.25e+6] +xi_inf_sl: [53.0e+6] h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] dot_gamma_0_sl: 0.001 diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Cu.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Cu.yaml index 528f55763..23fd38acd 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Cu.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Cu.yaml @@ -1,15 +1,15 @@ type: phenopowerlaw references: - T Takeuchi, - Transactions of the Japan Institute of Metals 16(10), 629-640, 1975, - 10.2320/matertrans1960.16.629, + Transactions of the Japan Institute of Metals 16(10):629-640, 1975, + https://doi.org/10.2320/matertrans1960.16.629, fitted from Fig. 3b output: [xi_sl, gamma_sl] N_sl: [12] n_sl: 20 a_sl: 1.0 -h_0_sl-sl: 2.4e8 -xi_0_sl: [1.5e6] -xi_inf_sl: [112.5e6] +h_0_sl-sl: 2.4e+8 +xi_0_sl: [1.5e+6] +xi_inf_sl: [112.5e+6] h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] -dot_gamma_0_sl: 3e-3 +dot_gamma_0_sl: 3.e-3 diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_DP-steel-ferrite.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_DP-steel-ferrite.yaml index 84c0ecdc9..40812c89d 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_DP-steel-ferrite.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_DP-steel-ferrite.yaml @@ -1,14 +1,14 @@ type: phenopowerlaw references: - C.C. Tasan et al., - Acta Materialia, 81, 386-400, 2014, - 10.1016/j.actamat.2014.07.071 + Acta Materialia 81:386-400, 2014, + https://doi.org/10.1016/j.actamat.2014.07.071 output: [xi_sl, gamma_sl] N_sl: [12, 12] n_sl: 20 a_sl: 2.25 -h_0_sl-sl: 1.0e9 -xi_0_sl: [95.e6, 96.e6] -xi_inf_sl: [222.e6, 412.e6] +h_0_sl-sl: 1.0e+9 +xi_0_sl: [95.e+6, 96.e+6] +xi_inf_sl: [222.e+6, 412.e+6] h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4] dot_gamma_0_sl: 0.001 diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml new file mode 100644 index 000000000..5e05206df --- /dev/null +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml @@ -0,0 +1,20 @@ +type: phenopowerlaw +references: + - C. Zambaldi et al., + Journal of Materials Research 27(1):356-367, 2021, + https://doi.org/10.1557/jmr.2011.334 + - L. Wang et al., + Acta Materialia 132:598-610, 2017, + https://doi.org/10.1016/j.actamat.2017.05.015 +output: [gamma_sl] +N_sl: [3, 3, 0, 0, 12] +n_sl: 20 +a_sl: 2.0 +dot_gamma_0_sl: 0.001 +h_0_sl-sl: 200.e+6 +# C. Zambaldi et al.: +xi_0_sl: [349.e+6, 150.e+6, 0.0, 0.0, 1107.e+6] +xi_inf_sl: [568.e+6, 150.e+7, 0.0, 0.0, 3420.e+6] +# L. Wang et al. : +# xi_0_sl: [127.e+6, 96.e+6, 0.0, 0.0, 240.e+6] +h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] diff --git a/examples/config/phase/thermal/Al.yaml b/examples/config/phase/thermal/Al.yaml index 1f714c743..50273fde3 100644 --- a/examples/config/phase/thermal/Al.yaml +++ b/examples/config/phase/thermal/Al.yaml @@ -1,5 +1,5 @@ references: - - www.engineeringtoolbox.com/thermal-conductivity-metals-d_858.html - - www.engineeringtoolbox.com/specific-heat-metals-d_152.html + - https://www.engineeringtoolbox.com/thermal-conductivity-metals-d_858.html + - https://www.engineeringtoolbox.com/specific-heat-metals-d_152.html C_p: 910.0 K_11: 236.0 diff --git a/examples/config/phase/thermal/Au.yaml b/examples/config/phase/thermal/Au.yaml index f8f4dda4d..43fe9558e 100644 --- a/examples/config/phase/thermal/Au.yaml +++ b/examples/config/phase/thermal/Au.yaml @@ -1,4 +1,4 @@ references: - - de.wikipedia.org/wiki/Gold + - https://de.wikipedia.org/wiki/Gold C_p: 128.0 K_11: 320.0 diff --git a/examples/config/phase/thermal/Cu.yaml b/examples/config/phase/thermal/Cu.yaml index 7f5b89fcc..c171d3245 100644 --- a/examples/config/phase/thermal/Cu.yaml +++ b/examples/config/phase/thermal/Cu.yaml @@ -1,4 +1,4 @@ references: - - www.mit.edu/~6.777/matprops/copper.htm + - https://www.mit.edu/~6.777/matprops/copper.htm C_p: 385.0 K_11: 401.0 diff --git a/examples/config/phase/thermal/Steel-0.5C.yaml b/examples/config/phase/thermal/Steel-0.5C.yaml index 736969e1e..1b1d8995a 100644 --- a/examples/config/phase/thermal/Steel-0.5C.yaml +++ b/examples/config/phase/thermal/Steel-0.5C.yaml @@ -1,5 +1,5 @@ references: - - www.engineeringtoolbox.com/thermal-conductivity-metals-d_858.html - - www.engineeringtoolbox.com/specific-heat-metals-d_152.html + - https://www.engineeringtoolbox.com/thermal-conductivity-metals-d_858.html + - https://www.engineeringtoolbox.com/specific-heat-metals-d_152.html C_p: 490.0 K_11: 54.0 diff --git a/examples/config/phase/thermal/W.yaml b/examples/config/phase/thermal/W.yaml index eba6ab7bc..95918303f 100644 --- a/examples/config/phase/thermal/W.yaml +++ b/examples/config/phase/thermal/W.yaml @@ -1,4 +1,4 @@ references: - - www.mit.edu/~6.777/matprops/tungsten.htm + - https://www.mit.edu/~6.777/matprops/tungsten.htm C_p: 132.51 K_11: 178.0 diff --git a/examples/config/phase/thermal/fast-convection.yaml b/examples/config/phase/thermal/fast-convection.yaml index 4899baaf2..fc5f83609 100644 --- a/examples/config/phase/thermal/fast-convection.yaml +++ b/examples/config/phase/thermal/fast-convection.yaml @@ -1,3 +1,3 @@ C_p: 1 -K_11: 1e30 -K_33: 1e30 +K_11: 1.e+30 +K_33: 1.e+30 diff --git a/examples/grid/material.yaml b/examples/grid/material.yaml index 31cd812fc..a89068e1b 100644 --- a/examples/grid/material.yaml +++ b/examples/grid/material.yaml @@ -9,19 +9,19 @@ phase: lattice: cF mechanical: output: [F, P, F_e, F_p, L_p, O] - elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9} + elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9} plastic: type: phenopowerlaw N_sl: [12] a_sl: 2.25 atol_xi: 1.0 dot_gamma_0_sl: 0.001 - h_0_sl-sl: 75e6 + h_0_sl-sl: 75.e+6 h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] n_sl: 20 output: [xi_sl] - xi_0_sl: [31e6] - xi_inf_sl: [63e6] + xi_0_sl: [31.e+6] + xi_inf_sl: [63.e+6] material: - homogenization: SX diff --git a/examples/grid/shearXY.yaml b/examples/grid/shearXY.yaml index 900877533..9f4ffa389 100644 --- a/examples/grid/shearXY.yaml +++ b/examples/grid/shearXY.yaml @@ -6,7 +6,7 @@ solver: loadstep: - boundary_conditions: mechanical: - dot_F: [ [0, 0, 0], [1e-3, 0, 0], [0, 0, 0] ] + dot_F: [ [0, 0, 0], [1.e-3, 0, 0], [0, 0, 0] ] discretization: t: 60 N: 120 diff --git a/examples/grid/shearZX.yaml b/examples/grid/shearZX.yaml index 0d47e6c8b..9e7ff31a1 100644 --- a/examples/grid/shearZX.yaml +++ b/examples/grid/shearZX.yaml @@ -6,7 +6,7 @@ solver: loadstep: - boundary_conditions: mechanical: - dot_F: [[0, 0, 1e-3], [0, 0, 0], [0, 0, 0]] + dot_F: [[0, 0, 1.e-3], [0, 0, 0], [0, 0, 0]] discretization: t: 60 N: 120 diff --git a/examples/grid/tension-hold-unload.yaml b/examples/grid/tension-hold-unload.yaml deleted file mode 100644 index 59dcd4aec..000000000 --- a/examples/grid/tension-hold-unload.yaml +++ /dev/null @@ -1,37 +0,0 @@ -solver: {mechanical: spectral_basic} - -loadstep: - - boundary_conditions: - mechanical: - P: - - [x, x, x] - - [x, 0, x] - - [x, x, 0] - dot_F: - - [0.001, 0, 0] - - [0, x, 0] - - [0, 0, x] - discretization: {t: 10.0, N: 40} - f_out: 4 - - boundary_conditions: - mechanical: - dot_P: - - [0, x, x] - - [x, x, x] - - [x, x, x] - dot_F: - - [x, 0, 0] - - [0, 0, 0] - - [0, 0, 0] - discretization: {t: 10.0, N: 20} - - boundary_conditions: - mechanical: - P: - - [0, x, x] - - [x, 0, x] - - [x, x, 0] - dot_F: - - [x, 0, 0] - - [0, x, 0] - - [0, 0, x] - discretization: {t: 10.0, N: 20} diff --git a/examples/mesh/material.yaml b/examples/mesh/material.yaml index 5b8c3e929..94bbaa98f 100644 --- a/examples/mesh/material.yaml +++ b/examples/mesh/material.yaml @@ -8,19 +8,19 @@ phase: lattice: cF mechanical: output: [F, P, F_e, F_p, L_p] - elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9} + elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9} plastic: type: phenopowerlaw N_sl: [12] a_sl: 2.25 atol_xi: 1.0 dot_gamma_0_sl: 0.001 - h_0_sl-sl: 75e6 + h_0_sl-sl: 75.e+6 h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4] n_sl: 20 output: [xi_sl] - xi_0_sl: [31e6] - xi_inf_sl: [63e6] + xi_0_sl: [31.e+6] + xi_inf_sl: [63.e+6] material: - constituents: diff --git a/examples/mesh/tensionZ.load b/examples/mesh/tensionZ.load index ffc36a88f..eb9a7c426 100644 --- a/examples/mesh/tensionZ.load +++ b/examples/mesh/tensionZ.load @@ -1,13 +1,13 @@ #initial elastic step $Loadcase 1 time 0.0005 incs 1 frequency 5 Face 1 X 0.01 - Face 2 X 0.00 - Face 2 Y 0.00 - Face 2 Z 0.00 + Face 2 X 0.0 + Face 2 Y 0.0 + Face 2 Z 0.0 $EndLoadcase $Loadcase 2 time 10.0 incs 200 frequency 5 Face 1 X 0.01 - Face 2 X 0.00 - Face 2 Y 0.00 - Face 2 Z 0.00 + Face 2 X 0.0 + Face 2 Y 0.0 + Face 2 Z 0.0 $EndLoadcase diff --git a/install/MarcMentat/installation.txt b/install/MarcMentat/installation.txt index cff143d9f..d527af558 100644 --- a/install/MarcMentat/installation.txt +++ b/install/MarcMentat/installation.txt @@ -18,8 +18,6 @@ APPENDIX: The structure of this directory should be (VERSION = 20XX or 20XX.Y) -./installation.txt this text -./apply_DAMASK_modifications.sh script file to apply modifications to the installation ./VERSION/Marc_tools/comp_user.original original file from installation ./VERSION/Marc_tools/comp_damask_mp modified version using -O1 optimization and OpenMP ./VERSION/Marc_tools/comp_damask_lmp modified version using -O0 optimization and OpenMP @@ -36,6 +34,7 @@ The structure of this directory should be (VERSION = 20XX or 20XX.Y) ./VERSION/Mentat_bin/submit4 modified version of original calling run_h_marc ./VERSION/Mentat_bin/submit5 modified version of original calling run_marc ./VERSION/Mentat_bin/submit6 modified version of original calling run_l_marc +./VERSION/Mentat_bin/kill1.original original file from installation ./VERSION/Mentat_bin/kill4 kill file for submit4, identical to original kill1 ./VERSION/Mentat_bin/kill5 kill file for submit5, identical to original kill1 ./VERSION/Mentat_bin/kill6 kill file for submit6, identical to original kill1 diff --git a/processing/legacy/geom_grainGrowth.py b/processing/legacy/geom_grainGrowth.py index 23a46b668..d969b415c 100755 --- a/processing/legacy/geom_grainGrowth.py +++ b/processing/legacy/geom_grainGrowth.py @@ -28,6 +28,7 @@ Smoothen interface roughness by simulated curvature flow. This is achieved by the diffusion of each initially sharply bounded grain volume within the periodic domain up to a given distance 'd' voxels. The final geometry is assembled by selecting at each voxel that grain index for which the concentration remains largest. +References 10.1073/pnas.1111557108 (10.1006/jcph.1994.1105) """, version = scriptID) diff --git a/python/damask/VERSION b/python/damask/VERSION index caee80c43..a0034a82e 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha4-10-g0ad4fd00b +v3.0.0-alpha4-65-gae3d844e4 diff --git a/python/damask/_grid.py b/python/damask/_grid.py index c8ce2517a..4eb978427 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -406,23 +406,19 @@ class Grid: seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.]))) seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.]))) seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]]))) - coords = grid_filters.coordinates0_point(cells*3,size*3,-size).reshape(-1,3) else: weights_p = weights seeds_p = seeds - coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3) + + coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3) pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',4))) - result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), [coord for coord in coords]) + result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), coords) pool.close() pool.join() - material_ = np.array(result.get()) + material_ = np.array(result.get()).reshape(cells) - if periodic: - material_ = material_.reshape(cells*3) - material_ = material_[cells[0]:cells[0]*2,cells[1]:cells[1]*2,cells[2]:cells[2]*2]%seeds.shape[0] - else: - material_ = material_.reshape(cells) + if periodic: material_ %= len(weights) return Grid(material = material_ if material is None else material[material_], size = size, @@ -661,6 +657,30 @@ class Grid: updated : damask.Grid Updated grid-based geometry. + Examples + -------- + Add a sphere at the center. + + >>> import numpy as np + >>> import damask + >>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4) + >>> g.add_primitive(np.ones(3)*5e-5,np.ones(3)*5e-5,1) + cells a b c: 64 x 64 x 64 + size x y z: 0.0001 x 0.0001 x 0.0001 + origin x y z: 0.0 0.0 0.0 + # materials: 2 + + Add a cube at the origin. + + >>> import numpy as np + >>> import damask + >>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4) + >>> g.add_primitive(np.ones(3,int)*32,np.zeros(3),np.inf) + cells a b c: 64 x 64 x 64 + size x y z: 0.0001 x 0.0001 x 0.0001 + origin x y z: 0.0 0.0 0.0 + # materials: 2 + """ # radius and center r = np.array(dimension)/2.0*self.size/self.cells if np.array(dimension).dtype in np.sctypes['int'] else \ @@ -706,6 +726,19 @@ class Grid: updated : damask.Grid Updated grid-based geometry. + Examples + -------- + Mirror along x- and y-direction. + + >>> import numpy as np + >>> import damask + >>> g = damask.Grid(np.zeros([32]*3,int), np.ones(3)*1e-4) + >>> g.mirror('xy',True) + cells a b c: 64 x 64 x 32 + size x y z: 0.0002 x 0.0002 x 0.0001 + origin x y z: 0.0 0.0 0.0 + # materials: 1 + """ valid = ['x','y','z'] if not set(directions).issubset(valid): @@ -773,6 +806,19 @@ class Grid: updated : damask.Grid Updated grid-based geometry. + Examples + -------- + Double resolution. + + >>> import numpy as np + >>> import damask + >>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4) + >>> g.scale(g.cells*2) + cells a b c: 64 x 64 x 64 + size x y z: 0.0001 x 0.0001 x 0.0001 + origin x y z: 0.0 0.0 0.0 + # materials: 1 + """ return Grid(material = ndimage.interpolation.zoom( self.material, @@ -903,6 +949,19 @@ class Grid: updated : damask.Grid Updated grid-based geometry. + Examples + -------- + Remove 1/2 of the microstructure in z-direction. + + >>> import numpy as np + >>> import damask + >>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4) + >>> g.canvas(np.array([32,32,16],int)) + cells a b c: 33 x 32 x 16 + size x y z: 0.0001 x 0.0001 x 5e-05 + origin x y z: 0.0 0.0 0.0 + # materials: 1 + """ if offset is None: offset = 0 if fill is None: fill = np.nanmax(self.material) + 1 diff --git a/python/damask/_result.py b/python/damask/_result.py index 7d1700028..88bf4184a 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -1732,6 +1732,3 @@ class Result: if flatten: r = util.dict_flatten(r) return None if (type(r) == dict and r == {}) else r - - save_VTK = export_VTK - save_XDMF = export_XDMF diff --git a/python/damask/util.py b/python/damask/util.py index 8cf0d279f..ab204905b 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -577,8 +577,8 @@ class _ProgressBar: self.total = total self.prefix = prefix self.bar_length = bar_length - self.start_time = datetime.datetime.now() - self.last_fraction = 0.0 + self.time_start = self.time_last_update = datetime.datetime.now() + self.fraction_last = 0.0 sys.stderr.write(f"{self.prefix} {'░'*self.bar_length} 0% ETA n/a") sys.stderr.flush() @@ -588,17 +588,17 @@ class _ProgressBar: fraction = (iteration+1) / self.total filled_length = int(self.bar_length * fraction) - delta_time = datetime.datetime.now() - self.start_time - - if filled_length > int(self.bar_length * self.last_fraction) or \ - delta_time > datetime.timedelta(minutes=1): + if filled_length > int(self.bar_length * self.fraction_last) or \ + datetime.datetime.now() - self.time_last_update > datetime.timedelta(seconds=10): + self.time_last_update = datetime.datetime.now() bar = '█' * filled_length + '░' * (self.bar_length - filled_length) - remaining_time = (self.total - (iteration+1)) * delta_time / (iteration+1) + remaining_time = (datetime.datetime.now() - self.time_start) \ + * (self.total - (iteration+1)) / (iteration+1) remaining_time -= datetime.timedelta(microseconds=remaining_time.microseconds) # remove μs sys.stderr.write(f'\r{self.prefix} {bar} {fraction:>4.0%} ETA {remaining_time}') sys.stderr.flush() - self.last_fraction = fraction + self.fraction_last = fraction if iteration == self.total - 1: sys.stderr.write('\n') diff --git a/python/setup.py b/python/setup.py index 5d66d3c88..ff22c1640 100644 --- a/python/setup.py +++ b/python/setup.py @@ -11,8 +11,8 @@ setuptools.setup( version=version, author='The DAMASK team', author_email='damask@mpie.de', - description='DAMASK library', - long_description='Python library for managing DAMASK simulations', + description='DAMASK processing tools', + long_description='Pre- and post-processing tools for DAMASK', url='https://damask.mpie.de', packages=setuptools.find_packages(), include_package_data=True, diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index bf044bef9..ea5820852 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -5,6 +5,7 @@ !-------------------------------------------------------------------------------------------------- module CPFEM2 use prec + use parallelization use config use math use rotations @@ -15,15 +16,16 @@ module CPFEM2 use IO use base64 use DAMASK_interface - use results use discretization + use HDF5 use HDF5_utilities + use results use homogenization use phase -#if defined(Mesh) +#if defined(MESH) use FEM_quadrature use discretization_mesh -#elif defined(Grid) +#elif defined(GRID) use discretization_grid #endif @@ -43,7 +45,7 @@ subroutine CPFEM_initAll call prec_init call IO_init call base64_init -#ifdef Mesh +#ifdef MESH call FEM_quadrature_init #endif call YAML_types_init @@ -54,9 +56,9 @@ subroutine CPFEM_initAll call lattice_init call HDF5_utilities_init call results_init(restart=interface_restartInc>0) -#if defined(Mesh) +#if defined(MESH) call discretization_mesh_init(restart=interface_restartInc>0) -#elif defined(Grid) +#elif defined(GRID) call discretization_grid_init(restart=interface_restartInc>0) #endif call material_init(restart=interface_restartInc>0) diff --git a/src/DAMASK_Marc.f90 b/src/DAMASK_Marc.f90 index 910ca86c0..e4859b4ab 100644 --- a/src/DAMASK_Marc.f90 +++ b/src/DAMASK_Marc.f90 @@ -11,15 +11,17 @@ #define QUOTE(x) #x #define PASTE(x,y) x ## y +#ifdef Marc4DAMASK +#define MARC4DAMASK Marc4DAMASK +#endif + #include "prec.f90" module DAMASK_interface use prec -#if __INTEL_COMPILER >= 1800 use, intrinsic :: ISO_fortran_env, only: & compiler_version, & compiler_options -#endif use ifport, only: & CHDIR @@ -212,8 +214,8 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, & ! Marc common blocks are in fixed format so they have to be reformated to free format (f90) ! Beware of changes in newer Marc versions -#include QUOTE(PASTE(./Marc/include/concom,Marc4DAMASK)) ! concom is needed for inc, lovl -#include QUOTE(PASTE(./Marc/include/creeps,Marc4DAMASK)) ! creeps is needed for timinc (time increment) +#include QUOTE(PASTE(./Marc/include/concom,MARC4DAMASK)) ! concom is needed for inc, lovl +#include QUOTE(PASTE(./Marc/include/creeps,MARC4DAMASK)) ! creeps is needed for timinc (time increment) logical :: cutBack real(pReal), dimension(6) :: stress @@ -365,7 +367,7 @@ subroutine uedinc(inc,incsub) integer :: n, nqncomp, nqdatatype integer, save :: inc_written real(pReal), allocatable, dimension(:,:) :: d_n -#include QUOTE(PASTE(./Marc/include/creeps,Marc4DAMASK)) ! creeps is needed for timinc (time increment) +#include QUOTE(PASTE(./Marc/include/creeps,MARC4DAMASK)) ! creeps is needed for timinc (time increment) if (inc > inc_written) then diff --git a/src/DAMASK_interface.f90 b/src/DAMASK_interface.f90 index e5fbe20ac..52f58d363 100644 --- a/src/DAMASK_interface.f90 +++ b/src/DAMASK_interface.f90 @@ -86,6 +86,11 @@ subroutine DAMASK_interface_init print*, ' _/ _/ _/_/_/_/ _/ _/ _/ _/_/_/_/ _/_/ _/_/ _/_/' print*, ' _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/' print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/' +#if defined(GRID) + print*, ' Grid solver' +#elif defined(MESH) + print*, ' Mesh solver' +#endif #ifdef DEBUG print'(/,a)', ' debug version - debug version - debug version - debug version - debug version' #endif diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index 8c72fbbfb..638b74c0e 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -6,15 +6,19 @@ !-------------------------------------------------------------------------------------------------- module HDF5_utilities use HDF5 -#ifdef PETSc - use PETSC +#ifdef PETSC +#include + use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI +#endif #endif use prec use parallelization implicit none - public + private !-------------------------------------------------------------------------------------------------- !> @brief reads integer or float data of defined shape from file @@ -71,11 +75,23 @@ module HDF5_utilities module procedure HDF5_addAttribute_real_array end interface HDF5_addAttribute -#ifdef PETSc +#ifdef PETSC logical, parameter, private :: parallel_default = .true. #else logical, parameter, private :: parallel_default = .false. #endif + public :: & + HDF5_utilities_init, & + HDF5_read, & + HDF5_write, & + HDF5_addAttribute, & + HDF5_addGroup, & + HDF5_openGroup, & + HDF5_closeGroup, & + HDF5_openFile, & + HDF5_closeFile, & + HDF5_objectExists, & + HDF5_setLink contains @@ -130,7 +146,7 @@ integer(HID_T) function HDF5_openFile(fileName,mode) call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -187,7 +203,7 @@ integer(HID_T) function HDF5_addGroup(fileHandle,groupName) !------------------------------------------------------------------------------------------------- ! setting I/O mode to collective -#ifdef PETSc +#ifdef PETSC call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -223,7 +239,7 @@ integer(HID_T) function HDF5_openGroup(fileHandle,groupName) !------------------------------------------------------------------------------------------------- ! setting I/O mode to collective -#ifdef PETSc +#ifdef PETSC call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1692,7 +1708,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ !-------------------------------------------------------------------------------------------------- readSize = 0 readSize(worldrank+1) = int(localShape(ubound(localShape,1))) -#ifdef PETSc +#ifdef PETSC if (parallel) then call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' @@ -1713,7 +1729,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ ! creating a property list for IO and set it to collective call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr) if(hdferr < 0) error stop 'HDF5 error' #endif @@ -1782,7 +1798,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & ! creating a property list for transfer properties (is collective when reading in parallel) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifdef PETSc +#ifdef PETSC if (parallel) then call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' @@ -1793,7 +1809,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & ! determine the global data layout among all processes writeSize = 0 writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) -#ifdef PETSc +#ifdef PETSC if (parallel) then call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' diff --git a/src/Marc/discretization_Marc.f90 b/src/Marc/discretization_Marc.f90 index 88020f046..d93eea2d5 100644 --- a/src/Marc/discretization_Marc.f90 +++ b/src/Marc/discretization_Marc.f90 @@ -5,7 +5,7 @@ !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !> @brief Sets up the mesh for the solver MSC.Marc !-------------------------------------------------------------------------------------------------- -module discretization_marc +module discretization_Marc use IO use prec use math @@ -72,15 +72,12 @@ subroutine discretization_Marc_init class(tNode), pointer :: & num_commercialFEM - print'(/,a)', ' <<<+- discretization_marc init -+>>>'; flush(6) -!--------------------------------------------------------------------------------- -! read debug parameters + print'(/,a)', ' <<<+- discretization_Marc init -+>>>'; flush(6) + debug_e = config_debug%get_asInt('element',defaultVal=1) debug_i = config_debug%get_asInt('integrationpoint',defaultVal=1) -!-------------------------------------------------------------------------------- -! read numerics parameter and do sanity check num_commercialFEM => config_numerics%get('commercialFEM',defaultVal = emptyDict) mesh_unitlength = num_commercialFEM%get_asFloat('unitlength',defaultVal=1.0_pReal) ! set physical extent of a length unit in mesh if (mesh_unitlength <= 0.0_pReal) call IO_error(301,ext_msg='unitlength') @@ -318,6 +315,7 @@ subroutine inputRead_matNumber(matNumber, & integer, allocatable, dimension(:) :: chunkPos integer :: i, j, data_blocks, l + do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) if(chunkPos(1) < 1) cycle @@ -382,6 +380,7 @@ subroutine inputRead_NelemSets(nElemSets,maxNelemInSet,& integer, allocatable, dimension(:) :: chunkPos integer :: i,l,elemInCurrentSet + nElemSets = 0 maxNelemInSet = 0 @@ -465,6 +464,7 @@ subroutine inputRead_mapElems(FEM2DAMASK, & integer, allocatable, dimension(:) :: chunkPos integer :: i,j,l,nNodesAlreadyRead + do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) if(chunkPos(1) < 1) cycle @@ -508,6 +508,7 @@ subroutine inputRead_mapNodes(FEM2DAMASK, & integer, allocatable, dimension(:) :: chunkPos integer :: i, l + do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) if(chunkPos(1) < 1) cycle @@ -542,6 +543,7 @@ subroutine inputRead_elemNodes(nodes, & integer, allocatable, dimension(:) :: chunkPos integer :: i,j,m,l + allocate(nodes(3,nNode)) do l = 1, size(fileContent) @@ -551,9 +553,7 @@ subroutine inputRead_elemNodes(nodes, & chunkPos = [4,1,10,11,30,31,50,51,70] do i=1,nNode m = discretization_Marc_FEM2DAMASK_node(IO_intValue(fileContent(l+1+i),chunkPos,1)) - do j = 1,3 - nodes(j,m) = mesh_unitlength * IO_floatValue(fileContent(l+1+i),chunkPos,j+1) - enddo + nodes(1:3,m) = [(mesh_unitlength * IO_floatValue(fileContent(l+1+i),chunkPos,j+1),j=1,3)] enddo exit endif @@ -575,6 +575,7 @@ subroutine inputRead_elemType(elem, & integer, allocatable, dimension(:) :: chunkPos integer :: i,j,t,l,remainingChunks + t = -1 do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) @@ -664,6 +665,7 @@ function inputRead_connectivityElem(nElem,nNodes,fileContent) integer, dimension(1+nElem) :: contInts integer :: i,k,j,t,e,l,nNodesAlreadyRead + do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) if(chunkPos(1) < 1) cycle @@ -698,6 +700,7 @@ end function inputRead_connectivityElem !-------------------------------------------------------------------------------------------------- !> @brief Store material ID +!> @details 0-based ID in file is converted to 1-based ID used in DAMASK !-------------------------------------------------------------------------------------------------- subroutine inputRead_material(materialAt,& nElem,nNodes,nameElemSet,mapElemSet,initialcondTableStyle,fileContent) @@ -709,16 +712,16 @@ subroutine inputRead_material(materialAt,& nNodes, & !< number of nodes per element initialcondTableStyle character(len=*), dimension(:), intent(in) :: nameElemSet - integer, dimension(:,:), intent(in) :: mapElemSet !< list of elements in elementSet - character(len=*), dimension(:), intent(in) :: fileContent !< file content, separated per lines + integer, dimension(:,:), intent(in) :: mapElemSet !< list of elements in elementSet + character(len=*), dimension(:), intent(in) :: fileContent !< file content, separated per lines integer, allocatable, dimension(:) :: chunkPos integer, dimension(1+nElem) :: contInts - integer :: i,j,t,sv,myVal,e,nNodesAlreadyRead,l,k,m + integer :: i,j,t,sv,ID,e,nNodesAlreadyRead,l,k,m - allocate(materialAt(nElem),source=0) + allocate(materialAt(nElem)) do l = 1, size(fileContent) chunkPos = IO_stringPos(fileContent(l)) @@ -727,17 +730,17 @@ subroutine inputRead_material(materialAt,& IO_lc(IO_stringValue(fileContent(l),chunkPos,2)) == 'state') then k = merge(2,1,initialcondTableStyle == 2) chunkPos = IO_stringPos(fileContent(l+k)) - sv = IO_IntValue(fileContent(l+k),chunkPos,1) ! figure state variable index - if( (sv == 2)) then ! state var 2 is used to identify material from material.yaml + sv = IO_IntValue(fileContent(l+k),chunkPos,1) ! # of state variable + if (sv == 2) then ! state var 2 gives material ID m = 1 chunkPos = IO_stringPos(fileContent(l+k+m)) do while (scan(IO_stringValue(fileContent(l+k+m),chunkPos,1),'+-',back=.true.)>1) ! is noEfloat value? - myVal = nint(IO_floatValue(fileContent(l+k+m),chunkPos,1)) + ID = nint(IO_floatValue(fileContent(l+k+m),chunkPos,1)) if (initialcondTableStyle == 2) m = m + 2 contInts = continuousIntValues(fileContent(l+k+m+1:),nElem,nameElemSet,mapElemSet,size(nameElemSet)) ! get affected elements do i = 1,contInts(1) e = discretization_Marc_FEM2DAMASK_elem(contInts(1+i)) - materialAt(e) = myVal + materialAt(e) = ID + 1 enddo if (initialcondTableStyle == 0) m = m + 1 enddo @@ -861,7 +864,7 @@ pure subroutine buildCells(connectivity,definition, & do while (n+j<= size(candidates_local)*Nelem) if (any(candidates_global(1:2*nParentNodes,n+j)/=candidates_global(1:2*nParentNodes,n))) exit where (connectivity(:,:,candidates_global(nParentNodes*2+1,n+j)) == -candidates_global(nParentNodes*2+2,n+j)) ! still locally defined - connectivity(:,:,candidates_global(nParentNodes*2+1,n+j)) = nCellNode + 1 ! gets current new cell node id + connectivity(:,:,candidates_global(nParentNodes*2+1,n+j)) = nCellNode + 1 ! get current new cell node id end where j = j+1 @@ -1195,12 +1198,13 @@ logical function containsRange(str,chunkPos) character(len=*), intent(in) :: str integer, dimension(:), intent(in) :: chunkPos !< positions of start and end of each tag/chunk in given string + containsRange = .False. if(chunkPos(1) == 3) then - if(IO_lc(IO_stringValue(str,chunkPos,2)) == 'to') containsRange = .True. + if(IO_lc(IO_stringValue(str,chunkPos,2)) == 'to') containsRange = .True. endif end function containsRange -end module discretization_marc +end module discretization_Marc diff --git a/src/YAML_parse.f90 b/src/YAML_parse.f90 index 8e20e0c29..971fc208c 100644 --- a/src/YAML_parse.f90 +++ b/src/YAML_parse.f90 @@ -199,11 +199,7 @@ logical function isKeyValue(line) isKeyValue = .false. if( .not. isKey(line) .and. index(IO_rmComment(line),':') > 0 .and. .not. isFlow(line)) then - if(index(IO_rmComment(line),': ') > 0) then - isKeyValue = .true. - else - call IO_error(704,ext_msg=line) - endif + if(index(IO_rmComment(line),': ') > 0) isKeyValue = .true. endif end function isKeyValue @@ -418,6 +414,7 @@ recursive subroutine keyValue_toFlow(flow,s_flow,line) offset_value col_pos = index(line,':') + if(line(col_pos+1:col_pos+1) /= ' ') call IO_error(704,ext_msg=line) if(isFlow(line(col_pos+1:))) then d_flow = len_trim(adjustl(line(:col_pos))) flow(s_flow:s_flow+d_flow+1) = trim(adjustl(line(:col_pos)))//' ' diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index d1b90c42b..e98b2d818 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -8,7 +8,11 @@ !-------------------------------------------------------------------------------------------------- program DAMASK_grid #include - use PETScsys + use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif + use prec use parallelization use DAMASK_interface @@ -432,7 +436,7 @@ program DAMASK_grid print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged' endif; flush(IO_STDOUT) - call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then print'(1/,a)', ' ... writing results to file ......................................' @@ -440,14 +444,14 @@ program DAMASK_grid call CPFEM_results(totalIncsCounter,time) endif if(signal) call interface_setSIGUSR1(.false.) - call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then call mechanical_restartWrite call CPFEM_restartWrite endif if(signal) call interface_setSIGUSR2(.false.) - call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' if (signal) exit loadCaseLooping endif skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 312c79aa7..8d1d38503 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -6,7 +6,10 @@ !-------------------------------------------------------------------------------------------------- module discretization_grid #include - use PETScsys + use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization @@ -75,12 +78,12 @@ subroutine discretization_grid_init(restart) endif - call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr) + call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' print'(/,a,3(i12 ))', ' cells a b c: ', grid @@ -105,13 +108,13 @@ subroutine discretization_grid_init(restart) myGrid = [grid(1:2),grid3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' allocate(materialAt(product(myGrid))) - call MPI_scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' call discretization_init(materialAt, & diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 26e0a909e..162d665cb 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -7,8 +7,11 @@ module grid_damage_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization @@ -107,7 +110,7 @@ subroutine grid_damage_spectral_init() call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -187,10 +190,10 @@ function grid_damage_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(phi_current - phi_stagInc)) solnNorm = maxval(abs(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - phi_stagInc = phi_current + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + phi_stagInc = phi_current !-------------------------------------------------------------------------------------------------- ! updating damage state @@ -320,9 +323,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index f603407ce..77678137d 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -7,13 +7,17 @@ module grid_mechanical_FEM #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -162,7 +166,7 @@ subroutine grid_mechanical_FEM_init CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -236,16 +240,16 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -270,10 +274,10 @@ subroutine grid_mechanical_FEM_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) @@ -567,7 +571,7 @@ subroutine formResidual(da_local,x_local, & call utilities_constitutiveResponse(P_current,& P_av,C_volAvg,devNull, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index aee4b4427..83b961023 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -7,13 +7,17 @@ module grid_mechanical_spectral_basic #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -98,7 +102,11 @@ subroutine grid_mechanical_spectral_basic_init F ! pointer to solution data PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -153,7 +161,7 @@ subroutine grid_mechanical_spectral_basic_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -184,16 +192,16 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -213,16 +221,16 @@ subroutine grid_mechanical_spectral_basic_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -487,7 +495,7 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 218e42678..07edc84b5 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -7,13 +7,17 @@ module grid_mechanical_spectral_polarisation #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization use DAMASK_interface use IO + use HDF5 use HDF5_utilities use math use rotations @@ -111,7 +115,11 @@ subroutine grid_mechanical_spectral_polarisation_init F_tau ! specific (sub)pointer PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - integer :: fileUnit +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + type(MPI_File) :: fileUnit +#else + integer :: fileUnit +#endif class (tNode), pointer :: & num_grid, & debug_grid @@ -173,7 +181,7 @@ subroutine grid_mechanical_spectral_polarisation_init call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -206,16 +214,16 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -239,16 +247,16 @@ subroutine grid_mechanical_spectral_polarisation_init restartRead2: if (interface_restartInc > 0) then print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) - call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & + call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) call MPI_File_close(fileUnit,ierr) @@ -543,7 +551,7 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) @@ -587,7 +595,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 5a1745668..47c49e76f 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -7,8 +7,11 @@ module grid_thermal_spectral #include #include - use PETScdmda - use PETScsnes + use PETScDMDA + use PETScSNES +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use parallelization @@ -102,7 +105,7 @@ subroutine grid_thermal_spectral_init(T_0) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) localK = 0 localK(worldrank) = grid3 - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -182,10 +185,10 @@ function grid_thermal_spectral_solution(timeinc) result(solution) endif stagNorm = maxval(abs(T_current - T_stagInc)) solnNorm = maxval(abs(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr) - T_stagInc = T_current + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + T_stagInc = T_current !-------------------------------------------------------------------------------------------------- ! updating thermal state @@ -310,9 +313,9 @@ subroutine updateReference() enddo K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f512d9b6c..aa7c2f4a7 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -8,6 +8,9 @@ module spectral_utilities #include use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use DAMASK_interface @@ -591,7 +594,7 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal) enddo; enddo if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space @@ -651,7 +654,7 @@ real(pReal) function utilities_curlRMS() + sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -816,7 +819,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & ' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av) @@ -840,21 +843,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& end do valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr) + call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) if (ierr /= 0) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -909,7 +912,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim) utilities_forwardField = field_lastInc + rate*timeinc if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) @@ -982,8 +985,13 @@ subroutine utilities_updateCoords(F) rank_t, rank_b, & c, & ierr +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + type(MPI_Request), dimension(4) :: request + type(MPI_Status), dimension(4) :: status +#else integer, dimension(4) :: request integer, dimension(MPI_STATUS_SIZE,4) :: status +#endif real(pReal), dimension(3) :: step real(pReal), dimension(3,3) :: Favg integer, dimension(3) :: me @@ -1018,7 +1026,7 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) if(ierr /=0) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- @@ -1029,20 +1037,24 @@ subroutine utilities_updateCoords(F) rank_b = modulo(worldrank-1,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,request(1),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,request(2),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr) if(ierr /=0) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,PETSC_COMM_WORLD,request(3),ierr) + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr) if(ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,PETSC_COMM_WORLD,request(4),ierr) + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr) if(ierr /=0) error stop 'MPI error' call MPI_Waitall(4,request,status,ierr) if(ierr /=0) error stop 'MPI error' +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + ! ToDo +#else if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error' +#endif !-------------------------------------------------------------------------------------------------- ! calculate nodal displacements diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 8c9713eef..f34834272 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -12,6 +12,7 @@ module homogenization use material use phase use discretization + use HDF5 use HDF5_utilities use results use lattice diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index a84e3559f..6765d3d0d 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -6,11 +6,13 @@ module FEM_utilities #include #include #include + use PETScDMplex + use PETScDMDA + use PETScIS +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif - use PETScdmplex - use PETScdmda - use PETScis - use prec use config use math @@ -165,7 +167,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. ! reset cutBack status P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) end subroutine utilities_constitutiveResponse diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index d4206fcf5..88a19ade9 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -8,9 +8,12 @@ module discretization_mesh #include #include #include - use PETScdmplex - use PETScdmda - use PETScis + use PETScDMplex + use PETScDMDA + use PETScIS +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use DAMASK_interface use parallelization @@ -111,9 +114,9 @@ subroutine discretization_mesh_init(restart) ! get number of IDs in face sets (for boundary conditions?) call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) CHKERRQ(ierr) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) if (worldrank == 0) then call DMClone(globalMesh,geomMesh,ierr) @@ -134,7 +137,7 @@ subroutine discretization_mesh_init(restart) CHKERRQ(ierr) call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index 954070e81..6fa2f668b 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -8,11 +8,13 @@ module mesh_mechanical_FEM #include #include #include - - use PETScsnes + use PETScSNES use PETScDM use PETScDMplex use PETScDT +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif use prec use FEM_utilities @@ -396,7 +398,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) ForwardData = .false. !-------------------------------------------------------------------------------------------------- diff --git a/src/parallelization.f90 b/src/parallelization.f90 index ee8e71006..534478cef 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -6,11 +6,15 @@ module parallelization use, intrinsic :: ISO_fortran_env, only: & OUTPUT_UNIT -#ifdef PETSc +#ifdef PETSC #include - use petscsys + use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif !$ use OMP_LIB #endif + use prec implicit none @@ -20,7 +24,7 @@ module parallelization worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only) -#ifdef PETSc +#ifdef PETSC public :: & parallelization_init @@ -60,12 +64,12 @@ subroutine parallelization_init #endif CHKERRQ(petsc_err) - call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err) + call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err) if (err /= 0) error stop 'Could not determine worldrank' if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>' - call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err) + call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err) if (err /= 0) error stop 'Could not determine worldsize' if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize diff --git a/src/phase.f90 b/src/phase.f90 index da1ff9d14..e396e5661 100644 --- a/src/phase.f90 +++ b/src/phase.f90 @@ -14,6 +14,7 @@ module phase use lattice use discretization use parallelization + use HDF5 use HDF5_utilities implicit none diff --git a/src/phase_damage.f90 b/src/phase_damage.f90 index e9b62b702..d8ac5046a 100644 --- a/src/phase_damage.f90 +++ b/src/phase_damage.f90 @@ -74,7 +74,7 @@ contains module subroutine damage_init integer :: & - ph, & !< counter in phase loop + ph, & Nmembers class(tNode), pointer :: & phases, & @@ -105,10 +105,10 @@ module subroutine damage_init if (sources%length == 1) then damage_active = .true. source => sources%get(1) - param(ph)%mu = source%get_asFloat('M',defaultVal=0.0_pReal) - param(ph)%K(1,1) = source%get_asFloat('D_11',defaultVal=0.0_pReal) - param(ph)%K(3,3) = source%get_asFloat('D_33',defaultVal=0.0_pReal) - param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase%get_asString('lattice')) + param(ph)%mu = source%get_asFloat('mu',defaultVal=0.0_pReal) ! ToDo: make mandatory? + param(ph)%K(1,1) = source%get_asFloat('K_11',defaultVal=0.0_pReal) ! ToDo: make mandatory? + param(ph)%K(3,3) = source%get_asFloat('K_33',defaultVal=0.0_pReal) ! ToDo: depends on symmetry + param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase_lattice(ph)) endif enddo diff --git a/src/phase_mechanical.f90 b/src/phase_mechanical.f90 index 104127b53..7562c055f 100644 --- a/src/phase_mechanical.f90 +++ b/src/phase_mechanical.f90 @@ -267,15 +267,11 @@ module subroutine mechanical_init(materials,phases) enddo -! initialize elasticity call elastic_init(phases) -! initialize plasticity allocate(plasticState(phases%length)) allocate(phase_plasticity(phases%length),source = PLASTICITY_undefined_ID) - call plastic_init() - do ph = 1,phases%length plasticState(ph)%state0 = plasticState(ph)%state enddo diff --git a/src/phase_mechanical_plastic.f90 b/src/phase_mechanical_plastic.f90 index 6a053e12d..75da4fcdd 100644 --- a/src/phase_mechanical_plastic.f90 +++ b/src/phase_mechanical_plastic.f90 @@ -232,6 +232,8 @@ module subroutine plastic_init where(plastic_dislotungsten_init()) phase_plasticity = PLASTICITY_DISLOTUNGSTEN_ID where(plastic_nonlocal_init()) phase_plasticity = PLASTICITY_NONLOCAL_ID + if (any(phase_plasticity == PLASTICITY_undefined_ID)) call IO_error(201) + end subroutine plastic_init !-------------------------------------------------------------------------------------------------- diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index c70d33f78..de0d20b11 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -32,11 +32,12 @@ submodule(phase:plastic) dislotwin xi_sb = 1.0_pReal, & !< value for shearband resistance v_sb = 1.0_pReal, & !< value for shearband velocity_0 E_sb = 1.0_pReal, & !< activation energy for shear bands - Gamma_sf_0K = 1.0_pReal, & !< stacking fault energy at zero K - dGamma_sf_dT = 1.0_pReal, & !< temperature dependence of stacking fault energy delta_G = 1.0_pReal, & !< Free energy difference between austensite and martensite i_tr = 1.0_pReal, & !< adjustment parameter to calculate MFP for transformation - h = 1.0_pReal !< Stack height of hex nucleus + h = 1.0_pReal, & !< Stack height of hex nucleus + T_ref = 0.0_pReal + real(pReal), dimension(2) :: & + Gamma_sf = 0.0_pReal real(pReal), allocatable, dimension(:) :: & b_sl, & !< absolute length of Burgers vector [m] for each slip system b_tw, & !< absolute length of Burgers vector [m] for each twin system @@ -220,13 +221,9 @@ module function plastic_dislotwin_init() result(myPlasticity) prm%D_a = pl%get_asFloat('D_a') prm%D_0 = pl%get_asFloat('D_0') prm%Q_cl = pl%get_asFloat('Q_cl') - prm%ExtendedDislocations = pl%get_asBool('extend_dislocations',defaultVal = .false.) - if (prm%ExtendedDislocations) then - prm%Gamma_sf_0K = pl%get_asFloat('Gamma_sf_0K') - prm%dGamma_sf_dT = pl%get_asFloat('dGamma_sf_dT') - endif - prm%omitDipoles = pl%get_asBool('omit_dipoles',defaultVal = .false.) + prm%ExtendedDislocations = pl%get_asBool('extend_dislocations',defaultVal = .false.) + prm%omitDipoles = pl%get_asBool('omit_dipoles',defaultVal = .false.) ! multiplication factor according to crystal structure (nearest neighbors bcc vs fcc/hex) ! details: Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981 @@ -384,11 +381,14 @@ module function plastic_dislotwin_init() result(myPlasticity) if(prm%sum_N_sl + prm%sum_N_tw + prm%sum_N_tw > 0) & prm%D = pl%get_asFloat('D') - twinOrSlipActive: if (prm%sum_N_tw + prm%sum_N_tr > 0) then - prm%Gamma_sf_0K = pl%get_asFloat('Gamma_sf_0K') - prm%dGamma_sf_dT = pl%get_asFloat('dGamma_sf_dT') - prm%V_cs = pl%get_asFloat('V_cs') - endif twinOrSlipActive + if (prm%sum_N_tw + prm%sum_N_tr > 0) & + prm%V_cs = pl%get_asFloat('V_cs') + + if (prm%sum_N_tw + prm%sum_N_tr > 0 .or. prm%ExtendedDislocations) then + prm%T_ref = pl%get_asFloat('T_ref') + prm%Gamma_sf(1) = pl%get_asFloat('Gamma_sf') + prm%Gamma_sf(2) = pl%get_asFloat('Gamma_sf,T',defaultVal=0.0_pReal) + endif slipAndTwinActive: if (prm%sum_N_sl * prm%sum_N_tw > 0) then prm%h_sl_tw = lattice_interaction_SlipByTwin(N_sl,N_tw,& @@ -689,7 +689,7 @@ module subroutine dislotwin_dotState(Mp,T,ph,en) ! Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981 sigma_cl = dot_product(prm%n0_sl(1:3,i),matmul(Mp,prm%n0_sl(1:3,i))) b_d = merge(24.0_pReal*PI*(1.0_pReal - prm%nu)/(2.0_pReal + prm%nu) & - * (prm%Gamma_sf_0K + prm%dGamma_sf_dT * T) / (prm%mu*prm%b_sl(i)), & + * (prm%Gamma_sf(1) + prm%Gamma_sf(2) * T) / (prm%mu*prm%b_sl(i)), & 1.0_pReal, & prm%ExtendedDislocations) v_cl = 2.0_pReal*prm%omega*b_d**2.0_pReal*exp(-prm%Q_cl/(kB*T)) & @@ -752,7 +752,7 @@ module subroutine dislotwin_dependentState(T,ph,en) sumf_tw = sum(stt%f_tw(1:prm%sum_N_tw,en)) sumf_tr = sum(stt%f_tr(1:prm%sum_N_tr,en)) - Gamma = prm%Gamma_sf_0K + prm%dGamma_sf_dT * T + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * T !* rescaled volume fraction for topology f_over_t_tw = stt%f_tw(1:prm%sum_N_tw,en)/prm%t_tw ! this is per system ... diff --git a/src/phase_mechanical_plastic_isotropic.f90 b/src/phase_mechanical_plastic_isotropic.f90 index 6694d6548..39a619897 100644 --- a/src/phase_mechanical_plastic_isotropic.f90 +++ b/src/phase_mechanical_plastic_isotropic.f90 @@ -31,8 +31,7 @@ submodule(phase:plastic) isotropic type :: tIsotropicState real(pReal), pointer, dimension(:) :: & - xi, & - gamma + xi end type tIsotropicState !-------------------------------------------------------------------------------------------------- @@ -122,7 +121,7 @@ module function plastic_isotropic_init() result(myPlasticity) !-------------------------------------------------------------------------------------------------- ! allocate state arrays Nmembers = count(material_phaseID == ph) - sizeDotState = size(['xi ','gamma']) + sizeDotState = size(['xi']) sizeState = sizeDotState call phase_allocateState(plasticState(ph),Nmembers,sizeState,sizeDotState,0) @@ -135,11 +134,6 @@ module function plastic_isotropic_init() result(myPlasticity) plasticState(ph)%atol(1) = pl%get_asFloat('atol_xi',defaultVal=1.0_pReal) if (plasticState(ph)%atol(1) < 0.0_pReal) extmsg = trim(extmsg)//' atol_xi' - stt%gamma => plasticState(ph)%state (2,:) - dot%gamma => plasticState(ph)%dotState(2,:) - plasticState(ph)%atol(2) = pl%get_asFloat('atol_gamma',defaultVal=1.0e-6_pReal) - if (plasticState(ph)%atol(2) < 0.0_pReal) extmsg = trim(extmsg)//' atol_gamma' - end associate !-------------------------------------------------------------------------------------------------- @@ -285,8 +279,6 @@ module subroutine isotropic_dotState(Mp,ph,en) dot%xi(en) = 0.0_pReal endif - dot%gamma(en) = dot_gamma ! ToDo: not really used - end associate end subroutine isotropic_dotState diff --git a/src/phase_thermal.f90 b/src/phase_thermal.f90 index 808cd7cf2..8d2915ef7 100644 --- a/src/phase_thermal.f90 +++ b/src/phase_thermal.f90 @@ -100,10 +100,10 @@ module subroutine thermal_init(phases) allocate(current(ph)%dot_T(Nmembers),source=0.0_pReal) phase => phases%get(ph) thermal => phase%get('thermal',defaultVal=emptyDict) - param(ph)%C_p = thermal%get_asFloat('C_p',defaultVal=0.0_pReal) + param(ph)%C_p = thermal%get_asFloat('C_p',defaultVal=0.0_pReal) ! ToDo: make mandatory? param(ph)%K(1,1) = thermal%get_asFloat('K_11',defaultVal=0.0_pReal) ! ToDo: make mandatory? param(ph)%K(3,3) = thermal%get_asFloat('K_33',defaultVal=0.0_pReal) ! ToDo: depends on symmtery - param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase%get_asString('lattice')) + param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase_lattice(ph)) sources => thermal%get('source',defaultVal=emptyList) thermal_Nsources(ph) = sources%length diff --git a/src/quit.f90 b/src/quit.f90 index 26dc23bac..786241d85 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -7,9 +7,6 @@ subroutine quit(stop_id) #include use PetscSys -#ifdef _OPENMP - use MPI -#endif use HDF5 implicit none diff --git a/src/results.f90 b/src/results.f90 index 174023f1a..94625a4b9 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -5,12 +5,18 @@ !> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH !-------------------------------------------------------------------------------------------------- module results + use prec use DAMASK_interface use parallelization use IO use HDF5_utilities -#ifdef PETSc - use PETSC + use HDF5 +#ifdef PETSC +#include + use PETScSys +#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) + use MPI_f08 +#endif #endif implicit none @@ -451,7 +457,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifndef PETSc +#ifndef PETSC entryGlobal = entry -1 ! 0-based #else !-------------------------------------------------------------------------------------------------- @@ -459,7 +465,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 @@ -468,7 +474,7 @@ subroutine results_mapping_phase(ID,entry,label) entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 enddo enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) @@ -604,7 +610,7 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' -#ifndef PETSc +#ifndef PETSC entryGlobal = entry -1 ! 0-based #else !-------------------------------------------------------------------------------------------------- @@ -612,14 +618,14 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' entryOffset = 0 do ce = 1, size(ID,1) entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 enddo - call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1)