Merge branch 'polishing-for-beta' into 'development'
preparing for beta release See merge request damask/DAMASK!408
This commit is contained in:
commit
442759a09f
|
@ -15,27 +15,27 @@ pkg_get_variable (CMAKE_Fortran_COMPILER PETSc fcompiler)
|
||||||
pkg_get_variable (CMAKE_C_COMPILER PETSc ccompiler)
|
pkg_get_variable (CMAKE_C_COMPILER PETSc ccompiler)
|
||||||
|
|
||||||
# Solver determines name of project
|
# Solver determines name of project
|
||||||
string(TOLOWER "${DAMASK_SOLVER}" DAMASK_SOLVER)
|
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER)
|
||||||
if (DAMASK_SOLVER STREQUAL "grid")
|
if (DAMASK_SOLVER STREQUAL "GRID")
|
||||||
project (damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
project (damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||||
add_definitions (-DGrid)
|
elseif (DAMASK_SOLVER STREQUAL "MESH")
|
||||||
elseif (DAMASK_SOLVER STREQUAL "mesh")
|
|
||||||
project (damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
project (damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
|
||||||
add_definitions (-DMesh)
|
|
||||||
else ()
|
else ()
|
||||||
message (FATAL_ERROR "Build target (DAMASK_SOLVER) is not defined")
|
message (FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
|
||||||
endif ()
|
endif ()
|
||||||
|
add_definitions ("-D${DAMASK_SOLVER}")
|
||||||
|
|
||||||
file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION)
|
file (STRINGS ${PROJECT_SOURCE_DIR}/VERSION DAMASK_VERSION)
|
||||||
|
|
||||||
message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n")
|
message ("\nBuilding ${CMAKE_PROJECT_NAME} ${DAMASK_VERSION}\n")
|
||||||
|
|
||||||
add_definitions (-DPETSc)
|
add_definitions (-DPETSC)
|
||||||
add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}")
|
add_definitions (-DDAMASKVERSION="${DAMASK_VERSION}")
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE STREQUAL "")
|
if (CMAKE_BUILD_TYPE STREQUAL "")
|
||||||
set (CMAKE_BUILD_TYPE "RELEASE")
|
set (CMAKE_BUILD_TYPE "RELEASE")
|
||||||
endif ()
|
endif ()
|
||||||
|
string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE)
|
||||||
|
|
||||||
# Predefined sets for OPTIMIZATION/OPENMP based on BUILD_TYPE
|
# Predefined sets for OPTIMIZATION/OPENMP based on BUILD_TYPE
|
||||||
if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||||
|
@ -45,9 +45,15 @@ if (CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||||
elseif (CMAKE_BUILD_TYPE STREQUAL "RELEASE")
|
elseif (CMAKE_BUILD_TYPE STREQUAL "RELEASE")
|
||||||
set (PARALLEL "ON")
|
set (PARALLEL "ON")
|
||||||
set (OPTI "DEFENSIVE")
|
set (OPTI "DEFENSIVE")
|
||||||
|
elseif (CMAKE_BUILD_TYPE STREQUAL "DEBUGRELEASE")
|
||||||
|
set (DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||||
|
set (PARALLEL "ON")
|
||||||
|
set (OPTI "DEFENSIVE")
|
||||||
elseif (CMAKE_BUILD_TYPE STREQUAL "PERFORMANCE")
|
elseif (CMAKE_BUILD_TYPE STREQUAL "PERFORMANCE")
|
||||||
set (PARALLEL "ON")
|
set (PARALLEL "ON")
|
||||||
set (OPTI "AGGRESSIVE")
|
set (OPTI "AGGRESSIVE")
|
||||||
|
else ()
|
||||||
|
message (FATAL_ERROR "Invalid build type: CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# $OPTIMIZATION takes precedence over $BUILD_TYPE defaults
|
# $OPTIMIZATION takes precedence over $BUILD_TYPE defaults
|
||||||
|
|
|
@ -56,15 +56,12 @@ echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||||
echo System report for \'$(hostname)\' created on $(date '+%Y-%m-%d %H:%M:%S') by \'$(whoami)\'
|
echo System report for \'$(hostname)\' created on $(date '+%Y-%m-%d %H:%M:%S') by \'$(whoami)\'
|
||||||
echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||||
|
|
||||||
firstLevel "DAMASK settings"
|
firstLevel "DAMASK"
|
||||||
secondLevel "DAMASK_ROOT"
|
secondLevel "DAMASK_ROOT"
|
||||||
echo $DAMASK_ROOT
|
echo $DAMASK_ROOT
|
||||||
echo
|
echo
|
||||||
secondLevel "Version"
|
secondLevel "Version"
|
||||||
cat VERSION
|
cat VERSION
|
||||||
echo
|
|
||||||
secondLevel "Settings in CONFIG"
|
|
||||||
cat env/CONFIG
|
|
||||||
|
|
||||||
firstLevel "System"
|
firstLevel "System"
|
||||||
uname -a
|
uname -a
|
||||||
|
@ -75,20 +72,26 @@ echo PYTHONPATH: $PYTHONPATH
|
||||||
echo SHELL: $SHELL
|
echo SHELL: $SHELL
|
||||||
echo PETSC_ARCH: $PETSC_ARCH
|
echo PETSC_ARCH: $PETSC_ARCH
|
||||||
echo PETSC_DIR: $PETSC_DIR
|
echo PETSC_DIR: $PETSC_DIR
|
||||||
ls $PETSC_DIR/lib
|
echo
|
||||||
|
echo $PETSC_DIR/$PETSC_ARCH/lib:
|
||||||
|
/s $PETSC_DIR/$PETSC_ARCH/lib
|
||||||
|
echo
|
||||||
|
echo $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables:
|
||||||
|
cat $PETSC_DIR/$PETSC_ARCH/lib/petsc/conf/petscvariables
|
||||||
|
|
||||||
|
|
||||||
firstLevel "Python"
|
firstLevel "Python"
|
||||||
DEFAULT_PYTHON=python3
|
DEFAULT_PYTHON=python3
|
||||||
for executable in python python3; do
|
for EXECUTABLE in python python3; do
|
||||||
getDetails $executable '--version'
|
getDetails $EXECUTABLE '--version'
|
||||||
done
|
done
|
||||||
secondLevel "Details on $DEFAULT_PYTHON:"
|
secondLevel "Details on $DEFAULT_PYTHON:"
|
||||||
echo $(ls -la $(which $DEFAULT_PYTHON))
|
echo $(ls -la $(which $DEFAULT_PYTHON))
|
||||||
for module in numpy scipy pandas matplotlib yaml h5py;do
|
for MODULE in numpy scipy pandas matplotlib yaml h5py;do
|
||||||
thirdLevel $module
|
thirdLevel $module
|
||||||
$DEFAULT_PYTHON -c "import $module; \
|
$DEFAULT_PYTHON -c "import $MODULE; \
|
||||||
print('Version: {}'.format($module.__version__)); \
|
print('Version: {}'.format($MODULE.__version__)); \
|
||||||
print('Location: {}'.format($module.__file__))"
|
print('Location: {}'.format($MODULE.__file__))"
|
||||||
done
|
done
|
||||||
thirdLevel vtk
|
thirdLevel vtk
|
||||||
$DEFAULT_PYTHON -c "import vtk; \
|
$DEFAULT_PYTHON -c "import vtk; \
|
||||||
|
@ -96,23 +99,23 @@ $DEFAULT_PYTHON -c "import vtk; \
|
||||||
print('Location: {}'.format(vtk.__file__))"
|
print('Location: {}'.format(vtk.__file__))"
|
||||||
|
|
||||||
firstLevel "GNU Compiler Collection"
|
firstLevel "GNU Compiler Collection"
|
||||||
for executable in gcc g++ gfortran ;do
|
for EXECUTABLE in gcc g++ gfortran ;do
|
||||||
getDetails $executable '--version'
|
getDetails $EXECUTABLE '--version'
|
||||||
done
|
done
|
||||||
|
|
||||||
firstLevel "Intel Compiler Suite"
|
firstLevel "Intel Compiler Suite"
|
||||||
for executable in icc icpc ifort ;do
|
for EXECUTABLE in icc icpc ifort ;do
|
||||||
getDetails $executable '--version'
|
getDetails $EXECUTABLE '--version'
|
||||||
done
|
done
|
||||||
|
|
||||||
firstLevel "MPI Wrappers"
|
firstLevel "MPI Wrappers"
|
||||||
for executable in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do
|
for EXECUTABLE in mpicc mpiCC mpiicc mpic++ mpiicpc mpicxx mpifort mpiifort mpif90 mpif77; do
|
||||||
getDetails $executable '-show'
|
getDetails $EXECUTABLE '-show'
|
||||||
done
|
done
|
||||||
|
|
||||||
firstLevel "MPI Launchers"
|
firstLevel "MPI Launchers"
|
||||||
for executable in mpirun mpiexec; do
|
for EXECUTABLE in mpirun mpiexec; do
|
||||||
getDetails $executable '--version'
|
getDetails $EXECUTABLE '--version'
|
||||||
done
|
done
|
||||||
|
|
||||||
firstLevel "CMake"
|
firstLevel "CMake"
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -9,13 +9,13 @@ all: grid mesh
|
||||||
|
|
||||||
.PHONY: grid
|
.PHONY: grid
|
||||||
grid:
|
grid:
|
||||||
@cmake -B build/grid -DDAMASK_SOLVER=GRID -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
|
@cmake -B build/grid -DDAMASK_SOLVER=grid -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
|
||||||
@cmake --build build/grid --parallel
|
@cmake --build build/grid --parallel
|
||||||
@cmake --install build/grid
|
@cmake --install build/grid
|
||||||
|
|
||||||
.PHONY: mesh
|
.PHONY: mesh
|
||||||
mesh:
|
mesh:
|
||||||
@cmake -B build/mesh -DDAMASK_SOLVER=MESH -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
|
@cmake -B build/mesh -DDAMASK_SOLVER=mesh -DCMAKE_INSTALL_PREFIX=${PWD} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILDCMD_POST=${BUILDCMD_POST} -DBUILDCMD_PRE=${BUILDCMD_PRE} -DOPTIMIZATION=${OPTIMIZATION} -DOPENMP=${OPENMP}
|
||||||
@cmake --build build/mesh --parallel
|
@cmake --build build/mesh --parallel
|
||||||
@cmake --install build/mesh
|
@cmake --install build/mesh
|
||||||
|
|
||||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit d399050216d814627edbe0ff1c05afe8f76c7b40
|
Subproject commit 1ca9a0e9f2b333d3244b1ab44480380b3b22bebe
|
|
@ -9,19 +9,19 @@ phase:
|
||||||
lattice: cF
|
lattice: cF
|
||||||
mechanical:
|
mechanical:
|
||||||
output: [F, P, F_e, F_p, L_p, O]
|
output: [F, P, F_e, F_p, L_p, O]
|
||||||
elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9}
|
elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9}
|
||||||
plastic:
|
plastic:
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
a_sl: 2.25
|
a_sl: 2.25
|
||||||
atol_xi: 1.0
|
atol_xi: 1.0
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
h_0_sl-sl: 75e6
|
h_0_sl-sl: 75.e+6
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
output: [xi_sl]
|
output: [xi_sl]
|
||||||
xi_0_sl: [31e6]
|
xi_0_sl: [31.e+6]
|
||||||
xi_inf_sl: [63e6]
|
xi_inf_sl: [63.e+6]
|
||||||
|
|
||||||
material:
|
material:
|
||||||
- homogenization: SX
|
- homogenization: SX
|
||||||
|
|
|
@ -6,21 +6,8 @@ plasticity disloucla
|
||||||
(output) dipole_density
|
(output) dipole_density
|
||||||
(output) shear_rate_slip
|
(output) shear_rate_slip
|
||||||
(output) accumulated_shear_slip
|
(output) accumulated_shear_slip
|
||||||
(output) mfp_slip
|
|
||||||
(output) resolved_stress_slip
|
(output) resolved_stress_slip
|
||||||
(output) threshold_stress_slip
|
(output) threshold_stress_slip
|
||||||
(output) twin_fraction
|
|
||||||
(output) shear_rate_twin
|
|
||||||
(output) accumulated_shear_twin
|
|
||||||
(output) mfp_twin
|
|
||||||
(output) resolved_stress_twin
|
|
||||||
(output) threshold_stress_twin
|
|
||||||
|
|
||||||
### Material parameters ###
|
|
||||||
lattice_structure bcc
|
|
||||||
C11 523.0e9 # From Marinica et al. Journal of Physics: Condensed Matter(2013)
|
|
||||||
C12 202.0e9
|
|
||||||
C44 161.0e9
|
|
||||||
|
|
||||||
grainsize 2.7e-5 # Average grain size [m] 2.0e-5
|
grainsize 2.7e-5 # Average grain size [m] 2.0e-5
|
||||||
SolidSolutionStrength 0.0 # Strength due to elements in solid solution
|
SolidSolutionStrength 0.0 # Strength due to elements in solid solution
|
||||||
|
|
|
@ -1,41 +1,31 @@
|
||||||
TWIP_Steel_FeMnC:
|
type: dislotwin
|
||||||
lattice: cF
|
output: [rho_mob, rho_dip, gamma_sl, Lambda_sl, tau_pass, f_tw, Lambda_tw, tau_hat_tw, f_tr]
|
||||||
mechanical:
|
D: 2.0e-5
|
||||||
elastic: {type: Hooke, C_11: 175.0e9, C_12: 115.0e9, C_44: 135.0e9}
|
N_sl: [12]
|
||||||
plastic:
|
b_sl: [2.56e-10]
|
||||||
type: dislotwin
|
rho_mob_0: [1.0e+12]
|
||||||
output: [rho_mob, rho_dip, gamma_sl, Lambda_sl, tau_pass, f_tw, Lambda_tw, tau_hat_tw, f_tr]
|
rho_dip_0: [1.0]
|
||||||
D: 2.0e-5
|
v_0: [1.0e+4]
|
||||||
N_sl: [12]
|
Q_s: [3.7e-19]
|
||||||
b_sl: [2.56e-10]
|
p_sl: [1.0]
|
||||||
rho_mob_0: [1.0e12]
|
q_sl: [1.0]
|
||||||
rho_dip_0: [1.0]
|
tau_0: [1.5e+8]
|
||||||
v_0: [1.0e4]
|
i_sl: [10.0] # Adj. parameter controlling dislocation mean free path
|
||||||
Q_s: [3.7e-19]
|
D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s
|
||||||
p_sl: [1.0]
|
D_a: 1.0 # minimum dipole distance / b
|
||||||
q_sl: [1.0]
|
Q_cl: 4.5e-19 # Activation energy for climb / J
|
||||||
tau_0: [1.5e8]
|
h_sl-sl: [0.122, 0.122, 0.625, 0.07, 0.137, 0.137, 0.122] # Interaction coefficients (Kubin et al. 2008)
|
||||||
i_sl: [10.0] # Adj. parameter controlling dislocation mean free path
|
|
||||||
D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s
|
|
||||||
D_a: 1.0 # minimum dipole distance / b
|
|
||||||
Q_cl: 4.5e-19 # Activation energy for climb / J
|
|
||||||
h_sl-sl: [0.122, 0.122, 0.625, 0.07, 0.137, 0.137, 0.122] # Interaction coefficients (Kubin et al. 2008)
|
|
||||||
# shear band parameters
|
|
||||||
xi_sb: 180.0e6
|
|
||||||
Q_sb: 3.7e-19
|
|
||||||
p_sb: 1.0
|
|
||||||
q_sb: 1.0
|
|
||||||
v_sb: 0.0 # set to 0, to turn it off
|
|
||||||
# twinning parameters
|
# twinning parameters
|
||||||
N_tw: [12]
|
N_tw: [12]
|
||||||
b_tw: [1.47e-10] # Burgers vector length of twin system / b
|
b_tw: [1.47e-10] # Burgers vector length of twin system / b
|
||||||
t_tw: [5.0e-8] # Twin stack mean thickness / m
|
t_tw: [5.0e-8] # Twin stack mean thickness / m
|
||||||
L_tw: 442.0 # Length of twin nuclei / b
|
L_tw: 442.0 # Length of twin nuclei / b
|
||||||
x_c_tw: 1.0e-9 # critical distance for formation of twin nucleus / m
|
x_c_tw: 1.0e-9 # critical distance for formation of twin nucleus / m
|
||||||
V_cs: 1.67e-29 # cross slip volume / m^3
|
V_cs: 1.67e-29 # cross slip volume / m^3
|
||||||
p_tw: [10.0] # r-exponent in twin formation probability
|
p_tw: [10.0] # r-exponent in twin formation probability
|
||||||
i_tw: 1.0 # Adj. parameter controlling twin mean free path
|
i_tw: 1.0 # Adj. parameter controlling twin mean free path
|
||||||
h_sl-tw: [0.0, 1.0, 1.0] # dislocation-twin interaction coefficients
|
h_sl-tw: [0.0, 1.0, 1.0] # dislocation-twin interaction coefficients
|
||||||
h_tw-tw: [0.0, 1.0] # twin-twin interaction coefficients
|
h_tw-tw: [0.0, 1.0] # twin-twin interaction coefficients
|
||||||
Gamma_sf_0K: -0.0396 # stacking fault energy / J/m^2 at zero K; TWIP steel: -0.0526; Cu: -0.0396
|
T_ref: 0.0
|
||||||
dGamma_sf_dT: 0.0002 # temperature dependence / J/(m^2 K) of stacking fault energy
|
Gamma_sf: -0.0396 # stacking fault energy / J/m^2 at zero K; TWIP steel: -0.0526; Cu: -0.0396
|
||||||
|
Gamma_sf,T: 0.0002 # temperature dependence / J/(m^2 K) of stacking fault energy
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
Tungsten:
|
|
||||||
lattice: cI
|
|
||||||
mechanical:
|
|
||||||
elastic: {type: Hooke, C_11: 523.0e9, C_12: 202.0e9, C_44: 161.0e9} # Marinica et al. Journal of Physics: Condensed Matter(2013)
|
|
||||||
plastic:
|
|
||||||
type: dislotwin
|
|
||||||
D: 2.0e-5 # Average grain size / m
|
|
||||||
N_sl: [12]
|
|
||||||
b_sl: [2.72e-10] # Burgers vector length of slip families / m
|
|
||||||
rho_mob_0: [1.0e12]
|
|
||||||
rho_dip_0: [1.0]
|
|
||||||
v_0: [1.0e4] # Initial glide velocity / m/s
|
|
||||||
Q_s: [2.725e-19] # Activation energy for dislocation glide / J
|
|
||||||
p_sl: [0.78] # p-exponent in glide velocity
|
|
||||||
q_sl: [1.58] # q-exponent in glide velocity
|
|
||||||
tau_0: [1.5e8] # solid solution strength / Pa
|
|
||||||
i_sl: [10.0] # Adj. parameter controlling dislocation mean free path
|
|
||||||
D_0: 4.0e-5 # Vacancy diffusion prefactor / m^2/s
|
|
||||||
D_a: 1.0 # minimum dipole distance / b
|
|
||||||
Q_cl: 4.5e-19 # Activation energy for climb / J
|
|
||||||
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4]
|
|
|
@ -1,16 +0,0 @@
|
||||||
FreeSurface:
|
|
||||||
lattice: cI
|
|
||||||
mechanical:
|
|
||||||
output: [F, P, F_e, F_p, L_p]
|
|
||||||
elastic: {type: Hooke, C_11: 1e8, C_12: 1e6, C_44: 4.95e7}
|
|
||||||
plastic:
|
|
||||||
type: isotropic
|
|
||||||
output: [xi]
|
|
||||||
xi_0: 0.3e6
|
|
||||||
xi_inf: 0.6e6
|
|
||||||
dot_gamma_0: 0.001
|
|
||||||
n: 5
|
|
||||||
M: 3
|
|
||||||
h_0: 1e6
|
|
||||||
a: 2
|
|
||||||
dilatation: True
|
|
|
@ -4,14 +4,14 @@
|
||||||
Martensite:
|
Martensite:
|
||||||
lattice: cI
|
lattice: cI
|
||||||
mechanical:
|
mechanical:
|
||||||
elastic: {C_11: 417.4e9, C_12: 242.4e9, C_44: 211.1e9, type: Hooke}
|
elastic: {C_11: 417.4e+9, C_12: 242.4e+9, C_44: 211.1e+9, type: Hooke}
|
||||||
plastic:
|
plastic:
|
||||||
N_sl: [12, 12]
|
N_sl: [12, 12]
|
||||||
a_sl: 2.0
|
a_sl: 2.0
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
h_0_sl-sl: 563.0e9
|
h_0_sl-sl: 563.0e+9
|
||||||
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
xi_0_sl: [405.8e6, 456.7e6]
|
xi_0_sl: [405.8e+6, 456.7e+6]
|
||||||
xi_inf_sl: [872.9e6, 971.2e6]
|
xi_inf_sl: [872.9e+6, 971.2e+6]
|
||||||
|
|
|
@ -1,28 +1,20 @@
|
||||||
# Tromans 2011, Elastic Anisotropy of HCP Metal Crystals and Polycrystals
|
N_sl: [3, 3, 0, 6, 0, 6]
|
||||||
Magnesium:
|
N_tw: [6, 0, 0, 6]
|
||||||
lattice: hP
|
h_0_tw-tw: 50.0e+6
|
||||||
c/a: 1.62350
|
h_0_sl-sl: 500.0e+6
|
||||||
mechanical:
|
h_0_tw-sl: 150.0e+6
|
||||||
output: [F, P, F_e, F_p, L_p, O]
|
h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
elastic: {C_11: 59.3e9, C_12: 25.7e9, C_13: 21.4e9, C_33: 61.5e9, C_44: 16.4e9, type: Hooke}
|
h_tw-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
plastic:
|
h_sl-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
N_sl: [3, 3, 0, 6, 0, 6]
|
h_tw-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
||||||
N_tw: [6, 0, 0, 6]
|
output: [xi_sl, xi_tw]
|
||||||
h_0_tw-tw: 50.0e6
|
type: phenopowerlaw
|
||||||
h_0_sl-sl: 500.0e6
|
xi_0_sl: [10.e+6, 55.e+6, 0., 60.e+6, 0., 60.e+6]
|
||||||
h_0_tw-sl: 150.0e6
|
xi_inf_sl: [40.e+6, 135.e+6, 0., 150.e+6, 0., 150.e+6]
|
||||||
h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
xi_0_tw: [40.e+6, 0., 0., 60.e+6]
|
||||||
h_tw-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
a_sl: 2.25
|
||||||
h_sl-tw: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
dot_gamma_0_sl: 0.001
|
||||||
h_tw-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
dot_gamma_0_tw: 0.001
|
||||||
output: [xi_sl, xi_tw]
|
n_sl: 20
|
||||||
type: phenopowerlaw
|
n_tw: 20
|
||||||
xi_0_sl: [10.0e6, 55.0e6, 0, 60.0e6, 0.0, 60.0e6]
|
f_sat_sl-tw: 10.0
|
||||||
xi_inf_sl: [40.0e6, 135.0e6, 0, 150.0e6, 0.0, 150.0e6]
|
|
||||||
xi_0_tw: [40e6, 0.0, 0.0, 60.0e6]
|
|
||||||
a_sl: 2.25
|
|
||||||
dot_gamma_0_sl: 0.001
|
|
||||||
dot_gamma_0_tw: 0.001
|
|
||||||
n_sl: 20
|
|
||||||
n_tw: 20
|
|
||||||
f_sat_sl-tw: 10.0
|
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
# M. Levy, Handbook of Elastic Properties of Solids, Liquids, and Gases (2001)
|
|
||||||
# C. Zambaldi, "Orientation informed nanoindentation of a-titanium: Indentation pileup in hexagonal metals deforming by prismatic slip", J. Mater. Res., Vol. 27, No. 1, Jan 14, 2012
|
|
||||||
# Better use values from L. Wang, Z. Zheng, H. Phukan, P. Kenesei, J.-S. Park, J. Lind, R.M. Suter, T.R. Bieler, Direct measurement of critical resolved shear stress of prismatic and basal slip in polycrystalline Ti using high energy X-ray diffraction microscopy, Acta Mater 2017
|
|
||||||
cpTi:
|
|
||||||
lattice: hP
|
|
||||||
c/a: 1.587
|
|
||||||
mechanical:
|
|
||||||
output: [F, P, F_e, F_p, L_p, O]
|
|
||||||
elastic: {C_11: 160.0e9, C_12: 90.0e9, C_13: 66.0e9, C_33: 181.7e9, C_44: 46.5e9, type: Hooke}
|
|
||||||
plastic:
|
|
||||||
N_sl: [3, 3, 0, 6, 12]
|
|
||||||
a_sl: 2.0
|
|
||||||
dot_gamma_0_sl: 0.001
|
|
||||||
h_0_sl-sl: 200e6
|
|
||||||
h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
|
||||||
n_sl: 20
|
|
||||||
output: [gamma_sl]
|
|
||||||
type: phenopowerlaw
|
|
||||||
xi_0_sl: [0.15e9, 0.09e9, 0, 0.20e9, 0.25e9]
|
|
||||||
xi_inf_sl: [0.24e9, 0.5e9, 0, 0.6e9, 0.8e9]
|
|
|
@ -30,7 +30,7 @@ grid:
|
||||||
eps_div_rtol: 5.0e-4 # relative tolerance for fulfillment of stress equilibrium
|
eps_div_rtol: 5.0e-4 # relative tolerance for fulfillment of stress equilibrium
|
||||||
eps_curl_atol: 1.0e-12 # absolute tolerance for fulfillment of strain compatibility
|
eps_curl_atol: 1.0e-12 # absolute tolerance for fulfillment of strain compatibility
|
||||||
eps_curl_rtol: 5.0e-4 # relative tolerance for fulfillment of strain compatibility
|
eps_curl_rtol: 5.0e-4 # relative tolerance for fulfillment of strain compatibility
|
||||||
eps_stress_atol: 1.0e3 # absolute tolerance for fulfillment of stress BC
|
eps_stress_atol: 1.0e+3 # absolute tolerance for fulfillment of stress BC
|
||||||
eps_stress_rtol: 0.01 # relative tolerance for fulfillment of stress BC
|
eps_stress_rtol: 0.01 # relative tolerance for fulfillment of stress BC
|
||||||
eps_damage_atol: 1.0e-2 # absolute tolerance for damage evolution
|
eps_damage_atol: 1.0e-2 # absolute tolerance for damage evolution
|
||||||
eps_damage_rtol: 1.0e-6 # relative tolerance for damage evolution
|
eps_damage_rtol: 1.0e-6 # relative tolerance for damage evolution
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
references:
|
||||||
|
- D. Tromans,
|
||||||
|
International Journal of Recent Research and Applied Studies 6(4)/462-483, 2011,
|
||||||
|
www.arpapress.com/Volumes/Vol6Issue4/IJRRAS_6_4_14.pdf
|
||||||
|
lattice: hP
|
||||||
|
c/a: 1.62350
|
||||||
|
rho: 1740.0
|
|
@ -1,11 +1,11 @@
|
||||||
type: anisobrittle
|
type: anisobrittle
|
||||||
N_cl: [3]
|
N_cl: [3]
|
||||||
g_crit: [0.50e7]
|
g_crit: [0.5e+7]
|
||||||
s_crit: [0.006666]
|
s_crit: [0.006666]
|
||||||
dot_o: 1e-3
|
dot_o: 1.e-3
|
||||||
q: 20
|
q: 20
|
||||||
|
|
||||||
output: [f_phi]
|
output: [f_phi]
|
||||||
|
|
||||||
D_11: 1.0
|
K_11: 1.0
|
||||||
M: 0.001
|
mu: 0.001
|
||||||
|
|
|
@ -5,5 +5,5 @@ isoBrittle_atol: 0.01
|
||||||
|
|
||||||
output: [f_phi]
|
output: [f_phi]
|
||||||
|
|
||||||
D_11: 1.0
|
K_11: 1.0
|
||||||
M: 0.001
|
mu: 0.001
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
type: thermalexpansion
|
type: thermalexpansion
|
||||||
references:
|
references:
|
||||||
- en.wikipedia.org/wiki/Thermal_expansion
|
- en.wikipedia.org/wiki/Thermal_expansion
|
||||||
A_11: 14e-6
|
A_11: 14.e-6
|
||||||
T_ref: 293.15
|
T_ref: 293.15
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
type: thermalexpansion
|
type: thermalexpansion
|
||||||
references:
|
references:
|
||||||
- en.wikipedia.org/wiki/Thermal_expansion
|
- en.wikipedia.org/wiki/Thermal_expansion
|
||||||
A_11: 17e-6
|
A_11: 17.e-6
|
||||||
T_ref: 293.15
|
T_ref: 293.15
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
type: Hooke
|
type: Hooke
|
||||||
references:
|
references:
|
||||||
- J. Vallin et al.,
|
- J. Vallin et al.,
|
||||||
Journal of Applied Physics 35(6), 1825-1826, 1964,
|
Journal of Applied Physics 35(6)/1825-1826, 1964,
|
||||||
10.1063/1.1713749
|
10.1063/1.1713749
|
||||||
C_11: 107.3e9
|
C_11: 107.3e+9
|
||||||
C_12: 60.8e9
|
C_12: 60.8e+9
|
||||||
C_44: 28.3e9
|
C_44: 28.3e+9
|
||||||
|
|
|
@ -4,6 +4,6 @@ references:
|
||||||
Theory of Dislocations, 1982,
|
Theory of Dislocations, 1982,
|
||||||
John Wiley & Sons,
|
John Wiley & Sons,
|
||||||
page 837
|
page 837
|
||||||
C_11: 186e9
|
C_11: 186.e+9
|
||||||
C_12: 157e9
|
C_12: 157.e+9
|
||||||
C_44: 42e9
|
C_44: 42.e+9
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
type: Hooke
|
type: Hooke
|
||||||
references:
|
references:
|
||||||
- www.mit.edu/~6.777/matprops/copper.htm, fixed typo
|
- www.mit.edu/~6.777/matprops/copper.htm, fixed typo
|
||||||
C_11: 168.3e9
|
C_11: 168.3e+9
|
||||||
C_12: 122.1e9
|
C_12: 122.1e+9
|
||||||
C_44: 75.7e9
|
C_44: 75.7e+9
|
||||||
|
|
|
@ -4,6 +4,6 @@ references:
|
||||||
Theory of Dislocations, 1982,
|
Theory of Dislocations, 1982,
|
||||||
John Wiley & Sons,
|
John Wiley & Sons,
|
||||||
page 837
|
page 837
|
||||||
C_11: 242e9
|
C_11: 242.e9
|
||||||
C_12: 146.5e9
|
C_12: 146.5e+9
|
||||||
C_44: 112e9
|
C_44: 112.e9
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
type: Hooke
|
||||||
|
references:
|
||||||
|
- D. Tromans,
|
||||||
|
International Journal of Recent Research and Applied Studies 6(4)/462-483, 2011,
|
||||||
|
www.arpapress.com/Volumes/Vol6Issue4/IJRRAS_6_4_14.pdf
|
||||||
|
C_11: 59.3e+9
|
||||||
|
C_33: 61.5e+9
|
||||||
|
C_44: 16.4e+9
|
||||||
|
C_12: 25.7e+9
|
||||||
|
C_13: 21.4e+9
|
|
@ -4,6 +4,6 @@ references:
|
||||||
Theory of Dislocations, 1982,
|
Theory of Dislocations, 1982,
|
||||||
John Wiley & Sons,
|
John Wiley & Sons,
|
||||||
page 837
|
page 837
|
||||||
C_11: 246.5e9
|
C_11: 246.5e+9
|
||||||
C_12: 147.3e9
|
C_12: 147.3e+9
|
||||||
C_44: 124.7e9
|
C_44: 124.7e+9
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
type: Hooke
|
||||||
|
references:
|
||||||
|
- D. Music et al.,
|
||||||
|
Applied Physics Letters 99(19)/191904, 2007,
|
||||||
|
10.1063/1.2807677
|
||||||
|
- S.L. Wong et al.,
|
||||||
|
Acta Materialia 118/140-151, 2016,
|
||||||
|
10.1016/j.actamat.2016.07.032
|
||||||
|
C_11: 175.0e+9
|
||||||
|
C_12: 115.0e+9
|
||||||
|
C_44: 135.0e+9
|
|
@ -1,10 +1,10 @@
|
||||||
type: Hooke
|
type: Hooke
|
||||||
references:
|
references:
|
||||||
- L. Wang et al.,
|
- L. Wang et al.,
|
||||||
Acta Materialia 132, 598-610, 2017,
|
Acta Materialia 132/598-610, 2017,
|
||||||
10.1016/j.actamat.2017.05.015
|
10.1016/j.actamat.2017.05.015
|
||||||
C_11: 162.4e9
|
C_11: 162.4e+9
|
||||||
C_33: 181.6e9
|
C_33: 181.6e+9
|
||||||
C_44: 47.2e9
|
C_44: 47.2e+9
|
||||||
C_12: 92e9
|
C_12: 92.e+9
|
||||||
C_13: 69e9
|
C_13: 69.e+9
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
type: Hooke
|
type: Hooke
|
||||||
references:
|
references:
|
||||||
- D. Cereceda et al.,
|
- D. Cereceda et al.,
|
||||||
International Journal of Plasticity, 78, 242-265, 2016,
|
International Journal of Plasticity 78/242-265, 2016,
|
||||||
10.1016/j.ijplas.2015.09.002
|
10.1016/j.ijplas.2015.09.002
|
||||||
C_11: 523.e9
|
C_11: 523.e+9
|
||||||
C_12: 202.e9
|
C_12: 202.e+9
|
||||||
C_44: 161.e9
|
C_44: 161.e+9
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
type: Hooke
|
||||||
|
references:
|
||||||
|
- T. Maiti and P. Eisenlohr,
|
||||||
|
Scripta Materialia 145/37-40, 2018,
|
||||||
|
10.1016/j.scriptamat.2017.09.047
|
||||||
|
C_11: 1.e+8
|
||||||
|
C_12: 1.e+6
|
||||||
|
C_44: 4.95e+7
|
|
@ -1,24 +1,24 @@
|
||||||
type: dislotwin
|
type: dislotwin
|
||||||
references:
|
references:
|
||||||
- K. Sedighiani et al.,
|
- K. Sedighiani et al.,
|
||||||
International Journal of Plasticity, 134, 102779, 2020,
|
International Journal of Plasticity 134/102779, 2020,
|
||||||
10.1016/j.ijplas.2020.102779
|
10.1016/j.ijplas.2020.102779
|
||||||
- K. Sedighiani et al.,
|
- K. Sedighiani et al.,
|
||||||
Mechanics of Materials, submitted
|
Mechanics of Materials, submitted
|
||||||
output: [rho_dip, rho_mob]
|
output: [rho_dip, rho_mob]
|
||||||
N_sl: [12, 12]
|
N_sl: [12, 12]
|
||||||
b_sl: [2.49e-10, 2.49e-10]
|
b_sl: [2.49e-10, 2.49e-10]
|
||||||
rho_mob_0: [2.81e12, 2.8e12]
|
rho_mob_0: [2.81e12, 2.8e+12]
|
||||||
rho_dip_0: [1.0, 1.0] # not given
|
rho_dip_0: [1.0, 1.0] # not given
|
||||||
v_0: [1.4e3, 1.4e3]
|
v_0: [1.4e+3, 1.4e+3]
|
||||||
Q_s: [1.57e-19, 1.57e-19] # Delta_F
|
Q_s: [1.57e-19, 1.57e-19] # Delta_F
|
||||||
tau_0: [454e6, 454e6]
|
tau_0: [454.e+6, 454.e+6]
|
||||||
p_sl: [0.325, 0.325]
|
p_sl: [0.325, 0.325]
|
||||||
q_sl: [1.55, 1.55]
|
q_sl: [1.55, 1.55]
|
||||||
i_sl: [23.3, 23.3]
|
i_sl: [23.3, 23.3]
|
||||||
D_a: 7.4 # C_anni
|
D_a: 7.4 # C_anni
|
||||||
B: [0.001, 0.001]
|
B: [0.001, 0.001]
|
||||||
h_sl-sl: [0.1, 0.72, 0.1, 0.053, 0.053, 0.073, 0.137, 0.72, 0.72, 0.053, 0.053, 0.053, 0.053, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.137, 0.073, 0.073, 0.137, 0.073]
|
h_sl-sl: [0.1, 0.72, 0.1, 0.053, 0.053, 0.073, 0.137, 0.72, 0.72, 0.053, 0.053, 0.053, 0.053, 0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.137, 0.073, 0.073, 0.137, 0.073]
|
||||||
D_0: 4.000E-05
|
D_0: 4.0e-05
|
||||||
Q_cl: 5.400E-19 # no recovery!
|
Q_cl: 5.4e-19 # no recovery!
|
||||||
D: 40e-6 # estimated
|
D: 40.e-6 # estimated
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
type: isotropic
|
||||||
|
references:
|
||||||
|
- T. Maiti and P. Eisenlohr,
|
||||||
|
Scripta Materialia 145/37-40, 2018,
|
||||||
|
10.1016/j.scriptamat.2017.09.047
|
||||||
|
output: [xi]
|
||||||
|
dot_gamma_0: 0.001
|
||||||
|
n: 20.
|
||||||
|
xi_0: 0.3e+6
|
||||||
|
xi_inf: 0.6e+6
|
||||||
|
a: 2.
|
||||||
|
h_0: 1.e+6
|
||||||
|
M: 1.
|
||||||
|
h: 1.
|
||||||
|
dilatation: True
|
|
@ -32,7 +32,7 @@ w: 10 # w_k in multiple of b
|
||||||
p_sl: 1
|
p_sl: 1
|
||||||
q_sl: 1
|
q_sl: 1
|
||||||
|
|
||||||
nu_a: 50.e9
|
nu_a: 50.e+9
|
||||||
B: 1.e-2
|
B: 1.e-2
|
||||||
f_ed: 1.0 # k_3
|
f_ed: 1.0 # k_3
|
||||||
|
|
||||||
|
@ -46,4 +46,4 @@ sigma_rho_u: 0 # no random distribution
|
||||||
|
|
||||||
|
|
||||||
short_range_stress_correction: false
|
short_range_stress_correction: false
|
||||||
rho_significant: 1e6
|
rho_significant: 1.e6
|
||||||
|
|
|
@ -13,10 +13,10 @@ d_sc: [12.e-9]
|
||||||
i_sl: [45] # k_2
|
i_sl: [45] # k_2
|
||||||
f_ed_mult: 0.1 # k_1
|
f_ed_mult: 0.1 # k_1
|
||||||
|
|
||||||
rho_u_ed_neg_0: [6.e10] # 2.88e12 / (12*4)
|
rho_u_ed_neg_0: [6.e+10] # 2.88e12 / (12*4)
|
||||||
rho_u_ed_pos_0: [6.e10] # 2.88e12 / (12*4)
|
rho_u_ed_pos_0: [6.e+10] # 2.88e12 / (12*4)
|
||||||
rho_u_sc_neg_0: [6.e10] # 2.88e12 / (12*4)
|
rho_u_sc_neg_0: [6.e+10] # 2.88e12 / (12*4)
|
||||||
rho_u_sc_pos_0: [6.e10] # 2.88e12 / (12*4)
|
rho_u_sc_pos_0: [6.e+10] # 2.88e12 / (12*4)
|
||||||
rho_d_ed_0: [0]
|
rho_d_ed_0: [0]
|
||||||
rho_d_sc_0: [0]
|
rho_d_sc_0: [0]
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ w: 10 # w_k
|
||||||
p_sl: 1
|
p_sl: 1
|
||||||
q_sl: 1
|
q_sl: 1
|
||||||
|
|
||||||
nu_a: 50.e9
|
nu_a: 50.e+9
|
||||||
B: 1.e-3
|
B: 1.e-3
|
||||||
f_ed: 0.01 # k_3
|
f_ed: 0.01 # k_3
|
||||||
|
|
||||||
|
@ -46,4 +46,4 @@ sigma_rho_u: 0 # no random distribution
|
||||||
|
|
||||||
|
|
||||||
short_range_stress_correction: false
|
short_range_stress_correction: false
|
||||||
rho_significant: 1e6
|
rho_significant: 1.e6
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
references:
|
references:
|
||||||
- W.F. Hosford et al.,
|
- W.F. Hosford et al.,
|
||||||
Acta Metallurgica, 8(3), 187-199, 1960,
|
Acta Metallurgica 8(3)/187-199, 1960,
|
||||||
10.1016/0001-6160(60)90127-9,
|
10.1016/0001-6160(60)90127-9,
|
||||||
fitted from Fig. 5
|
fitted from Fig. 5
|
||||||
output: [xi_sl, gamma_sl]
|
output: [xi_sl, gamma_sl]
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
a_sl: 3.1
|
a_sl: 3.1
|
||||||
h_0_sl-sl: 1.7e8
|
h_0_sl-sl: 1.7e+8
|
||||||
xi_0_sl: [5.0e6]
|
xi_0_sl: [5.0e+6]
|
||||||
xi_inf_sl: [37.5e6]
|
xi_inf_sl: [37.5e+6]
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
dot_gamma_0_sl: 4.5e-3
|
dot_gamma_0_sl: 4.5e-3
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
references:
|
references:
|
||||||
- D. Ma et al.,
|
- D. Ma et al.,
|
||||||
Acta Materialia, 103, 796-808, 2016,
|
Acta Materialia 103/796-808, 2016,
|
||||||
10.1016/j.actamat.2015.11.016
|
10.1016/j.actamat.2015.11.016
|
||||||
- I. Kovács and G.Vörös,
|
- I. Kovács and G.Vörös,
|
||||||
International Journal of Plasticity, 12, 35-43, 1996,
|
International Journal of Plasticity 12/35-43, 1996,
|
||||||
10.1016/S0749-6419(95)00043-7
|
10.1016/S0749-6419(95)00043-7
|
||||||
output: [xi_sl, gamma_sl]
|
output: [xi_sl, gamma_sl]
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
n_sl: 83.3
|
n_sl: 83.3
|
||||||
a_sl: 1.0
|
a_sl: 1.0
|
||||||
h_0_sl-sl: 75.0e6
|
h_0_sl-sl: 75.0e+6
|
||||||
xi_0_sl: [26.25e6]
|
xi_0_sl: [26.25e+6]
|
||||||
xi_inf_sl: [53.0e6]
|
xi_inf_sl: [53.0e+6]
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
references:
|
references:
|
||||||
- T Takeuchi,
|
- T Takeuchi,
|
||||||
Transactions of the Japan Institute of Metals 16(10), 629-640, 1975,
|
Transactions of the Japan Institute of Metals 16(10)/629-640, 1975,
|
||||||
10.2320/matertrans1960.16.629,
|
10.2320/matertrans1960.16.629,
|
||||||
fitted from Fig. 3b
|
fitted from Fig. 3b
|
||||||
output: [xi_sl, gamma_sl]
|
output: [xi_sl, gamma_sl]
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
a_sl: 1.0
|
a_sl: 1.0
|
||||||
h_0_sl-sl: 2.4e8
|
h_0_sl-sl: 2.4e+8
|
||||||
xi_0_sl: [1.5e6]
|
xi_0_sl: [1.5e+6]
|
||||||
xi_inf_sl: [112.5e6]
|
xi_inf_sl: [112.5e+6]
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
dot_gamma_0_sl: 3e-3
|
dot_gamma_0_sl: 3.e-3
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
references:
|
references:
|
||||||
- C.C. Tasan et al.,
|
- C.C. Tasan et al.,
|
||||||
Acta Materialia, 81, 386-400, 2014,
|
Acta Materialia 81/386-400, 2014,
|
||||||
10.1016/j.actamat.2014.07.071
|
10.1016/j.actamat.2014.07.071
|
||||||
output: [xi_sl, gamma_sl]
|
output: [xi_sl, gamma_sl]
|
||||||
N_sl: [12, 12]
|
N_sl: [12, 12]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
a_sl: 2.25
|
a_sl: 2.25
|
||||||
h_0_sl-sl: 1.0e9
|
h_0_sl-sl: 1.0e+9
|
||||||
xi_0_sl: [95.e6, 96.e6]
|
xi_0_sl: [95.e+6, 96.e+6]
|
||||||
xi_inf_sl: [222.e6, 412.e6]
|
xi_inf_sl: [222.e+6, 412.e+6]
|
||||||
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1.4, 1, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
type: phenopowerlaw
|
||||||
|
references:
|
||||||
|
- C. Zambaldi et al.,
|
||||||
|
Journal of Materials Research 27(1)/356-367, 2021,
|
||||||
|
10.1557/jmr.2011.334
|
||||||
|
- L. Wang et al.,
|
||||||
|
Acta Materialia 132/598-610, 2017,
|
||||||
|
10.1016/j.actamat.2017.05.015
|
||||||
|
output: [gamma_sl]
|
||||||
|
N_sl: [3, 3, 0, 0, 12]
|
||||||
|
n_sl: 20
|
||||||
|
a_sl: 2.0
|
||||||
|
dot_gamma_0_sl: 0.001
|
||||||
|
h_0_sl-sl: 200.e+6
|
||||||
|
# C. Zambaldi et al.:
|
||||||
|
xi_0_sl: [349.e+6, 150.e+6, 0.0, 0.0, 1107.e+6]
|
||||||
|
xi_inf_sl: [568.e+6, 150.e+7, 0.0, 0.0, 3420.e+6]
|
||||||
|
# L. Wang et al. :
|
||||||
|
# xi_0_sl: [127.e+6, 96.e+6, 0.0, 0.0, 240.e+6]
|
||||||
|
h_sl-sl: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
|
@ -1,3 +1,3 @@
|
||||||
C_p: 1
|
C_p: 1
|
||||||
K_11: 1e30
|
K_11: 1.e+30
|
||||||
K_33: 1e30
|
K_33: 1.e+30
|
||||||
|
|
|
@ -9,19 +9,19 @@ phase:
|
||||||
lattice: cF
|
lattice: cF
|
||||||
mechanical:
|
mechanical:
|
||||||
output: [F, P, F_e, F_p, L_p, O]
|
output: [F, P, F_e, F_p, L_p, O]
|
||||||
elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9}
|
elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9}
|
||||||
plastic:
|
plastic:
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
a_sl: 2.25
|
a_sl: 2.25
|
||||||
atol_xi: 1.0
|
atol_xi: 1.0
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
h_0_sl-sl: 75e6
|
h_0_sl-sl: 75.e+6
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
output: [xi_sl]
|
output: [xi_sl]
|
||||||
xi_0_sl: [31e6]
|
xi_0_sl: [31.e+6]
|
||||||
xi_inf_sl: [63e6]
|
xi_inf_sl: [63.e+6]
|
||||||
|
|
||||||
material:
|
material:
|
||||||
- homogenization: SX
|
- homogenization: SX
|
||||||
|
|
|
@ -6,7 +6,7 @@ solver:
|
||||||
loadstep:
|
loadstep:
|
||||||
- boundary_conditions:
|
- boundary_conditions:
|
||||||
mechanical:
|
mechanical:
|
||||||
dot_F: [ [0, 0, 0], [1e-3, 0, 0], [0, 0, 0] ]
|
dot_F: [ [0, 0, 0], [1.e-3, 0, 0], [0, 0, 0] ]
|
||||||
discretization:
|
discretization:
|
||||||
t: 60
|
t: 60
|
||||||
N: 120
|
N: 120
|
||||||
|
|
|
@ -6,7 +6,7 @@ solver:
|
||||||
loadstep:
|
loadstep:
|
||||||
- boundary_conditions:
|
- boundary_conditions:
|
||||||
mechanical:
|
mechanical:
|
||||||
dot_F: [[0, 0, 1e-3], [0, 0, 0], [0, 0, 0]]
|
dot_F: [[0, 0, 1.e-3], [0, 0, 0], [0, 0, 0]]
|
||||||
discretization:
|
discretization:
|
||||||
t: 60
|
t: 60
|
||||||
N: 120
|
N: 120
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
solver: {mechanical: spectral_basic}
|
|
||||||
|
|
||||||
loadstep:
|
|
||||||
- boundary_conditions:
|
|
||||||
mechanical:
|
|
||||||
P:
|
|
||||||
- [x, x, x]
|
|
||||||
- [x, 0, x]
|
|
||||||
- [x, x, 0]
|
|
||||||
dot_F:
|
|
||||||
- [0.001, 0, 0]
|
|
||||||
- [0, x, 0]
|
|
||||||
- [0, 0, x]
|
|
||||||
discretization: {t: 10.0, N: 40}
|
|
||||||
f_out: 4
|
|
||||||
- boundary_conditions:
|
|
||||||
mechanical:
|
|
||||||
dot_P:
|
|
||||||
- [0, x, x]
|
|
||||||
- [x, x, x]
|
|
||||||
- [x, x, x]
|
|
||||||
dot_F:
|
|
||||||
- [x, 0, 0]
|
|
||||||
- [0, 0, 0]
|
|
||||||
- [0, 0, 0]
|
|
||||||
discretization: {t: 10.0, N: 20}
|
|
||||||
- boundary_conditions:
|
|
||||||
mechanical:
|
|
||||||
P:
|
|
||||||
- [0, x, x]
|
|
||||||
- [x, 0, x]
|
|
||||||
- [x, x, 0]
|
|
||||||
dot_F:
|
|
||||||
- [x, 0, 0]
|
|
||||||
- [0, x, 0]
|
|
||||||
- [0, 0, x]
|
|
||||||
discretization: {t: 10.0, N: 20}
|
|
|
@ -8,19 +8,19 @@ phase:
|
||||||
lattice: cF
|
lattice: cF
|
||||||
mechanical:
|
mechanical:
|
||||||
output: [F, P, F_e, F_p, L_p]
|
output: [F, P, F_e, F_p, L_p]
|
||||||
elastic: {type: Hooke, C_11: 106.75e9, C_12: 60.41e9, C_44: 28.34e9}
|
elastic: {type: Hooke, C_11: 106.75e+9, C_12: 60.41e+9, C_44: 28.34e+9}
|
||||||
plastic:
|
plastic:
|
||||||
type: phenopowerlaw
|
type: phenopowerlaw
|
||||||
N_sl: [12]
|
N_sl: [12]
|
||||||
a_sl: 2.25
|
a_sl: 2.25
|
||||||
atol_xi: 1.0
|
atol_xi: 1.0
|
||||||
dot_gamma_0_sl: 0.001
|
dot_gamma_0_sl: 0.001
|
||||||
h_0_sl-sl: 75e6
|
h_0_sl-sl: 75.e+6
|
||||||
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
h_sl-sl: [1, 1, 1.4, 1.4, 1.4, 1.4, 1.4]
|
||||||
n_sl: 20
|
n_sl: 20
|
||||||
output: [xi_sl]
|
output: [xi_sl]
|
||||||
xi_0_sl: [31e6]
|
xi_0_sl: [31.e+6]
|
||||||
xi_inf_sl: [63e6]
|
xi_inf_sl: [63.e+6]
|
||||||
|
|
||||||
material:
|
material:
|
||||||
- constituents:
|
- constituents:
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
#initial elastic step
|
#initial elastic step
|
||||||
$Loadcase 1 time 0.0005 incs 1 frequency 5
|
$Loadcase 1 time 0.0005 incs 1 frequency 5
|
||||||
Face 1 X 0.01
|
Face 1 X 0.01
|
||||||
Face 2 X 0.00
|
Face 2 X 0.0
|
||||||
Face 2 Y 0.00
|
Face 2 Y 0.0
|
||||||
Face 2 Z 0.00
|
Face 2 Z 0.0
|
||||||
$EndLoadcase
|
$EndLoadcase
|
||||||
$Loadcase 2 time 10.0 incs 200 frequency 5
|
$Loadcase 2 time 10.0 incs 200 frequency 5
|
||||||
Face 1 X 0.01
|
Face 1 X 0.01
|
||||||
Face 2 X 0.00
|
Face 2 X 0.0
|
||||||
Face 2 Y 0.00
|
Face 2 Y 0.0
|
||||||
Face 2 Z 0.00
|
Face 2 Z 0.0
|
||||||
$EndLoadcase
|
$EndLoadcase
|
||||||
|
|
|
@ -18,8 +18,6 @@ APPENDIX:
|
||||||
|
|
||||||
The structure of this directory should be (VERSION = 20XX or 20XX.Y)
|
The structure of this directory should be (VERSION = 20XX or 20XX.Y)
|
||||||
|
|
||||||
./installation.txt this text
|
|
||||||
./apply_DAMASK_modifications.sh script file to apply modifications to the installation
|
|
||||||
./VERSION/Marc_tools/comp_user.original original file from installation
|
./VERSION/Marc_tools/comp_user.original original file from installation
|
||||||
./VERSION/Marc_tools/comp_damask_mp modified version using -O1 optimization and OpenMP
|
./VERSION/Marc_tools/comp_damask_mp modified version using -O1 optimization and OpenMP
|
||||||
./VERSION/Marc_tools/comp_damask_lmp modified version using -O0 optimization and OpenMP
|
./VERSION/Marc_tools/comp_damask_lmp modified version using -O0 optimization and OpenMP
|
||||||
|
@ -36,6 +34,7 @@ The structure of this directory should be (VERSION = 20XX or 20XX.Y)
|
||||||
./VERSION/Mentat_bin/submit4 modified version of original calling run_h_marc
|
./VERSION/Mentat_bin/submit4 modified version of original calling run_h_marc
|
||||||
./VERSION/Mentat_bin/submit5 modified version of original calling run_marc
|
./VERSION/Mentat_bin/submit5 modified version of original calling run_marc
|
||||||
./VERSION/Mentat_bin/submit6 modified version of original calling run_l_marc
|
./VERSION/Mentat_bin/submit6 modified version of original calling run_l_marc
|
||||||
|
./VERSION/Mentat_bin/kill1.original original file from installation
|
||||||
./VERSION/Mentat_bin/kill4 kill file for submit4, identical to original kill1
|
./VERSION/Mentat_bin/kill4 kill file for submit4, identical to original kill1
|
||||||
./VERSION/Mentat_bin/kill5 kill file for submit5, identical to original kill1
|
./VERSION/Mentat_bin/kill5 kill file for submit5, identical to original kill1
|
||||||
./VERSION/Mentat_bin/kill6 kill file for submit6, identical to original kill1
|
./VERSION/Mentat_bin/kill6 kill file for submit6, identical to original kill1
|
||||||
|
|
|
@ -28,6 +28,7 @@ Smoothen interface roughness by simulated curvature flow.
|
||||||
This is achieved by the diffusion of each initially sharply bounded grain volume within the periodic domain
|
This is achieved by the diffusion of each initially sharply bounded grain volume within the periodic domain
|
||||||
up to a given distance 'd' voxels.
|
up to a given distance 'd' voxels.
|
||||||
The final geometry is assembled by selecting at each voxel that grain index for which the concentration remains largest.
|
The final geometry is assembled by selecting at each voxel that grain index for which the concentration remains largest.
|
||||||
|
References 10.1073/pnas.1111557108 (10.1006/jcph.1994.1105)
|
||||||
|
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
|
|
|
@ -406,23 +406,19 @@ class Grid:
|
||||||
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
seeds_p = np.vstack((seeds -np.array([size[0],0.,0.]),seeds, seeds +np.array([size[0],0.,0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.])))
|
||||||
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]])))
|
||||||
coords = grid_filters.coordinates0_point(cells*3,size*3,-size).reshape(-1,3)
|
|
||||||
else:
|
else:
|
||||||
weights_p = weights
|
weights_p = weights
|
||||||
seeds_p = seeds
|
seeds_p = seeds
|
||||||
coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3)
|
|
||||||
|
coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3)
|
||||||
|
|
||||||
pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',4)))
|
pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',4)))
|
||||||
result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
|
result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), coords)
|
||||||
pool.close()
|
pool.close()
|
||||||
pool.join()
|
pool.join()
|
||||||
material_ = np.array(result.get())
|
material_ = np.array(result.get()).reshape(cells)
|
||||||
|
|
||||||
if periodic:
|
if periodic: material_ %= len(weights)
|
||||||
material_ = material_.reshape(cells*3)
|
|
||||||
material_ = material_[cells[0]:cells[0]*2,cells[1]:cells[1]*2,cells[2]:cells[2]*2]%seeds.shape[0]
|
|
||||||
else:
|
|
||||||
material_ = material_.reshape(cells)
|
|
||||||
|
|
||||||
return Grid(material = material_ if material is None else material[material_],
|
return Grid(material = material_ if material is None else material[material_],
|
||||||
size = size,
|
size = size,
|
||||||
|
@ -661,6 +657,30 @@ class Grid:
|
||||||
updated : damask.Grid
|
updated : damask.Grid
|
||||||
Updated grid-based geometry.
|
Updated grid-based geometry.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Add a sphere at the center.
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import damask
|
||||||
|
>>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4)
|
||||||
|
>>> g.add_primitive(np.ones(3)*5e-5,np.ones(3)*5e-5,1)
|
||||||
|
cells a b c: 64 x 64 x 64
|
||||||
|
size x y z: 0.0001 x 0.0001 x 0.0001
|
||||||
|
origin x y z: 0.0 0.0 0.0
|
||||||
|
# materials: 2
|
||||||
|
|
||||||
|
Add a cube at the origin.
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import damask
|
||||||
|
>>> g = damask.Grid(np.zeros([64]*3,int), np.ones(3)*1e-4)
|
||||||
|
>>> g.add_primitive(np.ones(3,int)*32,np.zeros(3),np.inf)
|
||||||
|
cells a b c: 64 x 64 x 64
|
||||||
|
size x y z: 0.0001 x 0.0001 x 0.0001
|
||||||
|
origin x y z: 0.0 0.0 0.0
|
||||||
|
# materials: 2
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# radius and center
|
# radius and center
|
||||||
r = np.array(dimension)/2.0*self.size/self.cells if np.array(dimension).dtype in np.sctypes['int'] else \
|
r = np.array(dimension)/2.0*self.size/self.cells if np.array(dimension).dtype in np.sctypes['int'] else \
|
||||||
|
@ -706,6 +726,19 @@ class Grid:
|
||||||
updated : damask.Grid
|
updated : damask.Grid
|
||||||
Updated grid-based geometry.
|
Updated grid-based geometry.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Mirror along x- and y-direction.
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import damask
|
||||||
|
>>> g = damask.Grid(np.zeros([32]*3,int), np.ones(3)*1e-4)
|
||||||
|
>>> g.mirror('xy',True)
|
||||||
|
cells a b c: 64 x 64 x 32
|
||||||
|
size x y z: 0.0002 x 0.0002 x 0.0001
|
||||||
|
origin x y z: 0.0 0.0 0.0
|
||||||
|
# materials: 1
|
||||||
|
|
||||||
"""
|
"""
|
||||||
valid = ['x','y','z']
|
valid = ['x','y','z']
|
||||||
if not set(directions).issubset(valid):
|
if not set(directions).issubset(valid):
|
||||||
|
@ -773,6 +806,19 @@ class Grid:
|
||||||
updated : damask.Grid
|
updated : damask.Grid
|
||||||
Updated grid-based geometry.
|
Updated grid-based geometry.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Double resolution.
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import damask
|
||||||
|
>>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4)
|
||||||
|
>>> g.scale(g.cells*2)
|
||||||
|
cells a b c: 64 x 64 x 64
|
||||||
|
size x y z: 0.0001 x 0.0001 x 0.0001
|
||||||
|
origin x y z: 0.0 0.0 0.0
|
||||||
|
# materials: 1
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return Grid(material = ndimage.interpolation.zoom(
|
return Grid(material = ndimage.interpolation.zoom(
|
||||||
self.material,
|
self.material,
|
||||||
|
@ -903,6 +949,19 @@ class Grid:
|
||||||
updated : damask.Grid
|
updated : damask.Grid
|
||||||
Updated grid-based geometry.
|
Updated grid-based geometry.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
Remove 1/2 of the microstructure in z-direction.
|
||||||
|
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import damask
|
||||||
|
>>> g = damask.Grid(np.zeros([32]*3,int),np.ones(3)*1e-4)
|
||||||
|
>>> g.canvas(np.array([32,32,16],int))
|
||||||
|
cells a b c: 33 x 32 x 16
|
||||||
|
size x y z: 0.0001 x 0.0001 x 5e-05
|
||||||
|
origin x y z: 0.0 0.0 0.0
|
||||||
|
# materials: 1
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if offset is None: offset = 0
|
if offset is None: offset = 0
|
||||||
if fill is None: fill = np.nanmax(self.material) + 1
|
if fill is None: fill = np.nanmax(self.material) + 1
|
||||||
|
|
|
@ -1732,6 +1732,3 @@ class Result:
|
||||||
if flatten: r = util.dict_flatten(r)
|
if flatten: r = util.dict_flatten(r)
|
||||||
|
|
||||||
return None if (type(r) == dict and r == {}) else r
|
return None if (type(r) == dict and r == {}) else r
|
||||||
|
|
||||||
save_VTK = export_VTK
|
|
||||||
save_XDMF = export_XDMF
|
|
||||||
|
|
|
@ -577,8 +577,8 @@ class _ProgressBar:
|
||||||
self.total = total
|
self.total = total
|
||||||
self.prefix = prefix
|
self.prefix = prefix
|
||||||
self.bar_length = bar_length
|
self.bar_length = bar_length
|
||||||
self.start_time = datetime.datetime.now()
|
self.time_start = self.time_last_update = datetime.datetime.now()
|
||||||
self.last_fraction = 0.0
|
self.fraction_last = 0.0
|
||||||
|
|
||||||
sys.stderr.write(f"{self.prefix} {'░'*self.bar_length} 0% ETA n/a")
|
sys.stderr.write(f"{self.prefix} {'░'*self.bar_length} 0% ETA n/a")
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
|
@ -588,17 +588,17 @@ class _ProgressBar:
|
||||||
fraction = (iteration+1) / self.total
|
fraction = (iteration+1) / self.total
|
||||||
filled_length = int(self.bar_length * fraction)
|
filled_length = int(self.bar_length * fraction)
|
||||||
|
|
||||||
delta_time = datetime.datetime.now() - self.start_time
|
if filled_length > int(self.bar_length * self.fraction_last) or \
|
||||||
|
datetime.datetime.now() - self.time_last_update > datetime.timedelta(seconds=10):
|
||||||
if filled_length > int(self.bar_length * self.last_fraction) or \
|
self.time_last_update = datetime.datetime.now()
|
||||||
delta_time > datetime.timedelta(minutes=1):
|
|
||||||
bar = '█' * filled_length + '░' * (self.bar_length - filled_length)
|
bar = '█' * filled_length + '░' * (self.bar_length - filled_length)
|
||||||
remaining_time = (self.total - (iteration+1)) * delta_time / (iteration+1)
|
remaining_time = (datetime.datetime.now() - self.time_start) \
|
||||||
|
* (self.total - (iteration+1)) / (iteration+1)
|
||||||
remaining_time -= datetime.timedelta(microseconds=remaining_time.microseconds) # remove μs
|
remaining_time -= datetime.timedelta(microseconds=remaining_time.microseconds) # remove μs
|
||||||
sys.stderr.write(f'\r{self.prefix} {bar} {fraction:>4.0%} ETA {remaining_time}')
|
sys.stderr.write(f'\r{self.prefix} {bar} {fraction:>4.0%} ETA {remaining_time}')
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
|
|
||||||
self.last_fraction = fraction
|
self.fraction_last = fraction
|
||||||
|
|
||||||
if iteration == self.total - 1:
|
if iteration == self.total - 1:
|
||||||
sys.stderr.write('\n')
|
sys.stderr.write('\n')
|
||||||
|
|
|
@ -11,8 +11,8 @@ setuptools.setup(
|
||||||
version=version,
|
version=version,
|
||||||
author='The DAMASK team',
|
author='The DAMASK team',
|
||||||
author_email='damask@mpie.de',
|
author_email='damask@mpie.de',
|
||||||
description='DAMASK library',
|
description='DAMASK processing tools',
|
||||||
long_description='Python library for managing DAMASK simulations',
|
long_description='Pre- and post-processing tools for DAMASK',
|
||||||
url='https://damask.mpie.de',
|
url='https://damask.mpie.de',
|
||||||
packages=setuptools.find_packages(),
|
packages=setuptools.find_packages(),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
module CPFEM2
|
module CPFEM2
|
||||||
use prec
|
use prec
|
||||||
|
use parallelization
|
||||||
use config
|
use config
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
@ -15,15 +16,16 @@ module CPFEM2
|
||||||
use IO
|
use IO
|
||||||
use base64
|
use base64
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use results
|
|
||||||
use discretization
|
use discretization
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
|
use results
|
||||||
use homogenization
|
use homogenization
|
||||||
use phase
|
use phase
|
||||||
#if defined(Mesh)
|
#if defined(MESH)
|
||||||
use FEM_quadrature
|
use FEM_quadrature
|
||||||
use discretization_mesh
|
use discretization_mesh
|
||||||
#elif defined(Grid)
|
#elif defined(GRID)
|
||||||
use discretization_grid
|
use discretization_grid
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -43,7 +45,7 @@ subroutine CPFEM_initAll
|
||||||
call prec_init
|
call prec_init
|
||||||
call IO_init
|
call IO_init
|
||||||
call base64_init
|
call base64_init
|
||||||
#ifdef Mesh
|
#ifdef MESH
|
||||||
call FEM_quadrature_init
|
call FEM_quadrature_init
|
||||||
#endif
|
#endif
|
||||||
call YAML_types_init
|
call YAML_types_init
|
||||||
|
@ -54,9 +56,9 @@ subroutine CPFEM_initAll
|
||||||
call lattice_init
|
call lattice_init
|
||||||
call HDF5_utilities_init
|
call HDF5_utilities_init
|
||||||
call results_init(restart=interface_restartInc>0)
|
call results_init(restart=interface_restartInc>0)
|
||||||
#if defined(Mesh)
|
#if defined(MESH)
|
||||||
call discretization_mesh_init(restart=interface_restartInc>0)
|
call discretization_mesh_init(restart=interface_restartInc>0)
|
||||||
#elif defined(Grid)
|
#elif defined(GRID)
|
||||||
call discretization_grid_init(restart=interface_restartInc>0)
|
call discretization_grid_init(restart=interface_restartInc>0)
|
||||||
#endif
|
#endif
|
||||||
call material_init(restart=interface_restartInc>0)
|
call material_init(restart=interface_restartInc>0)
|
||||||
|
|
|
@ -11,15 +11,17 @@
|
||||||
#define QUOTE(x) #x
|
#define QUOTE(x) #x
|
||||||
#define PASTE(x,y) x ## y
|
#define PASTE(x,y) x ## y
|
||||||
|
|
||||||
|
#ifdef Marc4DAMASK
|
||||||
|
#define MARC4DAMASK Marc4DAMASK
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "prec.f90"
|
#include "prec.f90"
|
||||||
|
|
||||||
module DAMASK_interface
|
module DAMASK_interface
|
||||||
use prec
|
use prec
|
||||||
#if __INTEL_COMPILER >= 1800
|
|
||||||
use, intrinsic :: ISO_fortran_env, only: &
|
use, intrinsic :: ISO_fortran_env, only: &
|
||||||
compiler_version, &
|
compiler_version, &
|
||||||
compiler_options
|
compiler_options
|
||||||
#endif
|
|
||||||
use ifport, only: &
|
use ifport, only: &
|
||||||
CHDIR
|
CHDIR
|
||||||
|
|
||||||
|
@ -212,8 +214,8 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, &
|
||||||
! Marc common blocks are in fixed format so they have to be reformated to free format (f90)
|
! Marc common blocks are in fixed format so they have to be reformated to free format (f90)
|
||||||
! Beware of changes in newer Marc versions
|
! Beware of changes in newer Marc versions
|
||||||
|
|
||||||
#include QUOTE(PASTE(./Marc/include/concom,Marc4DAMASK)) ! concom is needed for inc, lovl
|
#include QUOTE(PASTE(./Marc/include/concom,MARC4DAMASK)) ! concom is needed for inc, lovl
|
||||||
#include QUOTE(PASTE(./Marc/include/creeps,Marc4DAMASK)) ! creeps is needed for timinc (time increment)
|
#include QUOTE(PASTE(./Marc/include/creeps,MARC4DAMASK)) ! creeps is needed for timinc (time increment)
|
||||||
|
|
||||||
logical :: cutBack
|
logical :: cutBack
|
||||||
real(pReal), dimension(6) :: stress
|
real(pReal), dimension(6) :: stress
|
||||||
|
@ -365,7 +367,7 @@ subroutine uedinc(inc,incsub)
|
||||||
integer :: n, nqncomp, nqdatatype
|
integer :: n, nqncomp, nqdatatype
|
||||||
integer, save :: inc_written
|
integer, save :: inc_written
|
||||||
real(pReal), allocatable, dimension(:,:) :: d_n
|
real(pReal), allocatable, dimension(:,:) :: d_n
|
||||||
#include QUOTE(PASTE(./Marc/include/creeps,Marc4DAMASK)) ! creeps is needed for timinc (time increment)
|
#include QUOTE(PASTE(./Marc/include/creeps,MARC4DAMASK)) ! creeps is needed for timinc (time increment)
|
||||||
|
|
||||||
|
|
||||||
if (inc > inc_written) then
|
if (inc > inc_written) then
|
||||||
|
|
|
@ -86,6 +86,11 @@ subroutine DAMASK_interface_init
|
||||||
print*, ' _/ _/ _/_/_/_/ _/ _/ _/ _/_/_/_/ _/_/ _/_/ _/_/'
|
print*, ' _/ _/ _/_/_/_/ _/ _/ _/ _/_/_/_/ _/_/ _/_/ _/_/'
|
||||||
print*, ' _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/'
|
print*, ' _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/'
|
||||||
print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/'
|
print*, ' _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/ _/ _/ _/_/_/'
|
||||||
|
#if defined(GRID)
|
||||||
|
print*, ' Grid solver'
|
||||||
|
#elif defined(MESH)
|
||||||
|
print*, ' Mesh solver'
|
||||||
|
#endif
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
print'(/,a)', ' debug version - debug version - debug version - debug version - debug version'
|
print'(/,a)', ' debug version - debug version - debug version - debug version - debug version'
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -6,15 +6,19 @@
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
module HDF5_utilities
|
module HDF5_utilities
|
||||||
use HDF5
|
use HDF5
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
use PETSC
|
#include <petsc/finclude/petscsys.h>
|
||||||
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
public
|
private
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
!> @brief reads integer or float data of defined shape from file
|
!> @brief reads integer or float data of defined shape from file
|
||||||
|
@ -71,11 +75,23 @@ module HDF5_utilities
|
||||||
module procedure HDF5_addAttribute_real_array
|
module procedure HDF5_addAttribute_real_array
|
||||||
end interface HDF5_addAttribute
|
end interface HDF5_addAttribute
|
||||||
|
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
logical, parameter, private :: parallel_default = .true.
|
logical, parameter, private :: parallel_default = .true.
|
||||||
#else
|
#else
|
||||||
logical, parameter, private :: parallel_default = .false.
|
logical, parameter, private :: parallel_default = .false.
|
||||||
#endif
|
#endif
|
||||||
|
public :: &
|
||||||
|
HDF5_utilities_init, &
|
||||||
|
HDF5_read, &
|
||||||
|
HDF5_write, &
|
||||||
|
HDF5_addAttribute, &
|
||||||
|
HDF5_addGroup, &
|
||||||
|
HDF5_openGroup, &
|
||||||
|
HDF5_closeGroup, &
|
||||||
|
HDF5_openFile, &
|
||||||
|
HDF5_closeFile, &
|
||||||
|
HDF5_objectExists, &
|
||||||
|
HDF5_setLink
|
||||||
|
|
||||||
contains
|
contains
|
||||||
|
|
||||||
|
@ -130,7 +146,7 @@ integer(HID_T) function HDF5_openFile(fileName,mode)
|
||||||
call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr)
|
call h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
|
call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#endif
|
#endif
|
||||||
|
@ -187,7 +203,7 @@ integer(HID_T) function HDF5_addGroup(fileHandle,groupName)
|
||||||
|
|
||||||
!-------------------------------------------------------------------------------------------------
|
!-------------------------------------------------------------------------------------------------
|
||||||
! setting I/O mode to collective
|
! setting I/O mode to collective
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
|
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#endif
|
#endif
|
||||||
|
@ -223,7 +239,7 @@ integer(HID_T) function HDF5_openGroup(fileHandle,groupName)
|
||||||
|
|
||||||
!-------------------------------------------------------------------------------------------------
|
!-------------------------------------------------------------------------------------------------
|
||||||
! setting I/O mode to collective
|
! setting I/O mode to collective
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr)
|
call h5pget_all_coll_metadata_ops_f(aplist_id, is_collective, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#endif
|
#endif
|
||||||
|
@ -1692,7 +1708,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
readSize = 0
|
readSize = 0
|
||||||
readSize(worldrank+1) = int(localShape(ubound(localShape,1)))
|
readSize(worldrank+1) = int(localShape(ubound(localShape,1)))
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
if (parallel) then
|
if (parallel) then
|
||||||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
@ -1713,7 +1729,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
|
||||||
! creating a property list for IO and set it to collective
|
! creating a property list for IO and set it to collective
|
||||||
call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr)
|
call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
|
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#endif
|
#endif
|
||||||
|
@ -1782,7 +1798,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
||||||
! creating a property list for transfer properties (is collective when reading in parallel)
|
! creating a property list for transfer properties (is collective when reading in parallel)
|
||||||
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
if (parallel) then
|
if (parallel) then
|
||||||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
@ -1793,7 +1809,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
||||||
! determine the global data layout among all processes
|
! determine the global data layout among all processes
|
||||||
writeSize = 0
|
writeSize = 0
|
||||||
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)))
|
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)))
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
if (parallel) then
|
if (parallel) then
|
||||||
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process
|
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
|
@ -8,7 +8,11 @@
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
program DAMASK_grid
|
program DAMASK_grid
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
use PETScsys
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
|
@ -432,7 +436,7 @@ program DAMASK_grid
|
||||||
print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged'
|
print'(/,a,i0,a)', ' increment ', totalIncsCounter, ' NOT converged'
|
||||||
endif; flush(IO_STDOUT)
|
endif; flush(IO_STDOUT)
|
||||||
|
|
||||||
call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then
|
if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then
|
||||||
print'(1/,a)', ' ... writing results to file ......................................'
|
print'(1/,a)', ' ... writing results to file ......................................'
|
||||||
|
@ -440,14 +444,14 @@ program DAMASK_grid
|
||||||
call CPFEM_results(totalIncsCounter,time)
|
call CPFEM_results(totalIncsCounter,time)
|
||||||
endif
|
endif
|
||||||
if(signal) call interface_setSIGUSR1(.false.)
|
if(signal) call interface_setSIGUSR1(.false.)
|
||||||
call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then
|
if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then
|
||||||
call mechanical_restartWrite
|
call mechanical_restartWrite
|
||||||
call CPFEM_restartWrite
|
call CPFEM_restartWrite
|
||||||
endif
|
endif
|
||||||
if(signal) call interface_setSIGUSR2(.false.)
|
if(signal) call interface_setSIGUSR2(.false.)
|
||||||
call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
if (signal) exit loadCaseLooping
|
if (signal) exit loadCaseLooping
|
||||||
endif skipping
|
endif skipping
|
||||||
|
|
|
@ -6,7 +6,10 @@
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
module discretization_grid
|
module discretization_grid
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
use PETScsys
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
|
@ -75,12 +78,12 @@ subroutine discretization_grid_init(restart)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr)
|
call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
||||||
call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr)
|
call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
call MPI_Bcast(origin,3,MPI_DOUBLE,0,PETSC_COMM_WORLD, ierr)
|
call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
print'(/,a,3(i12 ))', ' cells a b c: ', grid
|
print'(/,a,3(i12 ))', ' cells a b c: ', grid
|
||||||
|
@ -105,13 +108,13 @@ subroutine discretization_grid_init(restart)
|
||||||
myGrid = [grid(1:2),grid3]
|
myGrid = [grid(1:2),grid3]
|
||||||
mySize = [geomSize(1:2),size3]
|
mySize = [geomSize(1:2),size3]
|
||||||
|
|
||||||
call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
allocate(materialAt(product(myGrid)))
|
allocate(materialAt(product(myGrid)))
|
||||||
call MPI_scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
call discretization_init(materialAt, &
|
call discretization_init(materialAt, &
|
||||||
|
|
|
@ -7,8 +7,11 @@
|
||||||
module grid_damage_spectral
|
module grid_damage_spectral
|
||||||
#include <petsc/finclude/petscsnes.h>
|
#include <petsc/finclude/petscsnes.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScsnes
|
use PETScSNES
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
|
@ -107,7 +110,7 @@ subroutine grid_damage_spectral_init()
|
||||||
call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr)
|
call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr)
|
||||||
localK = 0
|
localK = 0
|
||||||
localK(worldrank) = grid3
|
localK(worldrank) = grid3
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -187,10 +190,10 @@ function grid_damage_spectral_solution(timeinc) result(solution)
|
||||||
endif
|
endif
|
||||||
stagNorm = maxval(abs(phi_current - phi_stagInc))
|
stagNorm = maxval(abs(phi_current - phi_stagInc))
|
||||||
solnNorm = maxval(abs(phi_current))
|
solnNorm = maxval(abs(phi_current))
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
|
||||||
phi_stagInc = phi_current
|
|
||||||
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm)
|
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*solnNorm)
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr)
|
||||||
|
phi_stagInc = phi_current
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! updating damage state
|
! updating damage state
|
||||||
|
@ -320,9 +323,9 @@ subroutine updateReference()
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
K_ref = K_ref*wgt
|
K_ref = K_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
mu_ref = mu_ref*wgt
|
mu_ref = mu_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
end subroutine updateReference
|
end subroutine updateReference
|
||||||
|
|
||||||
|
|
|
@ -7,13 +7,17 @@
|
||||||
module grid_mechanical_FEM
|
module grid_mechanical_FEM
|
||||||
#include <petsc/finclude/petscsnes.h>
|
#include <petsc/finclude/petscsnes.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScsnes
|
use PETScSNES
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use IO
|
use IO
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
@ -162,7 +166,7 @@ subroutine grid_mechanical_FEM_init
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
localK = 0
|
localK = 0
|
||||||
localK(worldrank) = grid3
|
localK(worldrank) = grid3
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
|
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
|
||||||
DMDA_STENCIL_BOX, &
|
DMDA_STENCIL_BOX, &
|
||||||
|
@ -236,16 +240,16 @@ subroutine grid_mechanical_FEM_init
|
||||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F,groupHandle,'F')
|
call HDF5_read(F,groupHandle,'F')
|
||||||
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
||||||
|
@ -270,10 +274,10 @@ subroutine grid_mechanical_FEM_init
|
||||||
restartRead2: if (interface_restartInc > 0) then
|
restartRead2: if (interface_restartInc > 0) then
|
||||||
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
|
@ -567,7 +571,7 @@ subroutine formResidual(da_local,x_local, &
|
||||||
call utilities_constitutiveResponse(P_current,&
|
call utilities_constitutiveResponse(P_current,&
|
||||||
P_av,C_volAvg,devNull, &
|
P_av,C_volAvg,devNull, &
|
||||||
F,params%timeinc,params%rotation_BC)
|
F,params%timeinc,params%rotation_BC)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! stress BC handling
|
! stress BC handling
|
||||||
|
|
|
@ -7,13 +7,17 @@
|
||||||
module grid_mechanical_spectral_basic
|
module grid_mechanical_spectral_basic
|
||||||
#include <petsc/finclude/petscsnes.h>
|
#include <petsc/finclude/petscsnes.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScsnes
|
use PETScSNES
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use IO
|
use IO
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
@ -98,7 +102,11 @@ subroutine grid_mechanical_spectral_basic_init
|
||||||
F ! pointer to solution data
|
F ! pointer to solution data
|
||||||
PetscInt, dimension(0:worldsize-1) :: localK
|
PetscInt, dimension(0:worldsize-1) :: localK
|
||||||
integer(HID_T) :: fileHandle, groupHandle
|
integer(HID_T) :: fileHandle, groupHandle
|
||||||
integer :: fileUnit
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
type(MPI_File) :: fileUnit
|
||||||
|
#else
|
||||||
|
integer :: fileUnit
|
||||||
|
#endif
|
||||||
class (tNode), pointer :: &
|
class (tNode), pointer :: &
|
||||||
num_grid, &
|
num_grid, &
|
||||||
debug_grid
|
debug_grid
|
||||||
|
@ -153,7 +161,7 @@ subroutine grid_mechanical_spectral_basic_init
|
||||||
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
|
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
|
||||||
localK = 0
|
localK = 0
|
||||||
localK(worldrank) = grid3
|
localK(worldrank) = grid3
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -184,16 +192,16 @@ subroutine grid_mechanical_spectral_basic_init
|
||||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F,groupHandle,'F')
|
call HDF5_read(F,groupHandle,'F')
|
||||||
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
||||||
|
@ -213,16 +221,16 @@ subroutine grid_mechanical_spectral_basic_init
|
||||||
restartRead2: if (interface_restartInc > 0) then
|
restartRead2: if (interface_restartInc > 0) then
|
||||||
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
call HDF5_closeFile(fileHandle)
|
call HDF5_closeFile(fileHandle)
|
||||||
|
|
||||||
call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
||||||
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
|
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
|
||||||
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
|
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
|
||||||
call MPI_File_close(fileUnit,ierr)
|
call MPI_File_close(fileUnit,ierr)
|
||||||
|
@ -487,7 +495,7 @@ subroutine formResidual(in, F, &
|
||||||
call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory)
|
call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory)
|
||||||
P_av,C_volAvg,C_minMaxAvg, &
|
P_av,C_volAvg,C_minMaxAvg, &
|
||||||
F,params%timeinc,params%rotation_BC)
|
F,params%timeinc,params%rotation_BC)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! stress BC handling
|
! stress BC handling
|
||||||
|
|
|
@ -7,13 +7,17 @@
|
||||||
module grid_mechanical_spectral_polarisation
|
module grid_mechanical_spectral_polarisation
|
||||||
#include <petsc/finclude/petscsnes.h>
|
#include <petsc/finclude/petscsnes.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScsnes
|
use PETScSNES
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use IO
|
use IO
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
use math
|
use math
|
||||||
use rotations
|
use rotations
|
||||||
|
@ -111,7 +115,11 @@ subroutine grid_mechanical_spectral_polarisation_init
|
||||||
F_tau ! specific (sub)pointer
|
F_tau ! specific (sub)pointer
|
||||||
PetscInt, dimension(0:worldsize-1) :: localK
|
PetscInt, dimension(0:worldsize-1) :: localK
|
||||||
integer(HID_T) :: fileHandle, groupHandle
|
integer(HID_T) :: fileHandle, groupHandle
|
||||||
integer :: fileUnit
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
type(MPI_File) :: fileUnit
|
||||||
|
#else
|
||||||
|
integer :: fileUnit
|
||||||
|
#endif
|
||||||
class (tNode), pointer :: &
|
class (tNode), pointer :: &
|
||||||
num_grid, &
|
num_grid, &
|
||||||
debug_grid
|
debug_grid
|
||||||
|
@ -173,7 +181,7 @@ subroutine grid_mechanical_spectral_polarisation_init
|
||||||
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
|
call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr)
|
||||||
localK = 0
|
localK = 0
|
||||||
localK(worldrank) = grid3
|
localK(worldrank) = grid3
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -206,16 +214,16 @@ subroutine grid_mechanical_spectral_polarisation_init
|
||||||
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
groupHandle = HDF5_openGroup(fileHandle,'solver')
|
||||||
|
|
||||||
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
|
||||||
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
|
||||||
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
|
||||||
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
|
||||||
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(F,groupHandle,'F')
|
call HDF5_read(F,groupHandle,'F')
|
||||||
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
||||||
|
@ -239,16 +247,16 @@ subroutine grid_mechanical_spectral_polarisation_init
|
||||||
restartRead2: if (interface_restartInc > 0) then
|
restartRead2: if (interface_restartInc > 0) then
|
||||||
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
print'(a,i0,a)', ' reading more restart data of increment ', interface_restartInc, ' from file'
|
||||||
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
|
||||||
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
|
||||||
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
call HDF5_closeGroup(groupHandle)
|
call HDF5_closeGroup(groupHandle)
|
||||||
call HDF5_closeFile(fileHandle)
|
call HDF5_closeFile(fileHandle)
|
||||||
|
|
||||||
call MPI_File_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
||||||
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
|
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr)
|
||||||
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
|
call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr)
|
||||||
call MPI_File_close(fileUnit,ierr)
|
call MPI_File_close(fileUnit,ierr)
|
||||||
|
@ -543,7 +551,7 @@ subroutine formResidual(in, FandF_tau, &
|
||||||
X_RANGE, Y_RANGE, Z_RANGE)
|
X_RANGE, Y_RANGE, Z_RANGE)
|
||||||
|
|
||||||
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
|
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr)
|
call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr)
|
||||||
call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr)
|
call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr)
|
||||||
|
@ -587,7 +595,7 @@ subroutine formResidual(in, FandF_tau, &
|
||||||
call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory)
|
call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory)
|
||||||
P_av,C_volAvg,C_minMaxAvg, &
|
P_av,C_volAvg,C_minMaxAvg, &
|
||||||
F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC)
|
F - residual_F_tau/num%beta,params%timeinc,params%rotation_BC)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! stress BC handling
|
! stress BC handling
|
||||||
|
|
|
@ -7,8 +7,11 @@
|
||||||
module grid_thermal_spectral
|
module grid_thermal_spectral
|
||||||
#include <petsc/finclude/petscsnes.h>
|
#include <petsc/finclude/petscsnes.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScsnes
|
use PETScSNES
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use parallelization
|
use parallelization
|
||||||
|
@ -102,7 +105,7 @@ subroutine grid_thermal_spectral_init(T_0)
|
||||||
call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr)
|
call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr)
|
||||||
localK = 0
|
localK = 0
|
||||||
localK(worldrank) = grid3
|
localK(worldrank) = grid3
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||||
|
@ -182,10 +185,10 @@ function grid_thermal_spectral_solution(timeinc) result(solution)
|
||||||
endif
|
endif
|
||||||
stagNorm = maxval(abs(T_current - T_stagInc))
|
stagNorm = maxval(abs(T_current - T_stagInc))
|
||||||
solnNorm = maxval(abs(T_current))
|
solnNorm = maxval(abs(T_current))
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,solnNorm,1,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
|
|
||||||
T_stagInc = T_current
|
|
||||||
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm)
|
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*solnNorm)
|
||||||
|
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr)
|
||||||
|
T_stagInc = T_current
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! updating thermal state
|
! updating thermal state
|
||||||
|
@ -310,9 +313,9 @@ subroutine updateReference()
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
K_ref = K_ref*wgt
|
K_ref = K_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
mu_ref = mu_ref*wgt
|
mu_ref = mu_ref*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
end subroutine updateReference
|
end subroutine updateReference
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,9 @@ module spectral_utilities
|
||||||
|
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
use PETScSys
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
|
@ -591,7 +594,7 @@ real(pReal) function utilities_divergenceRMS()
|
||||||
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal)
|
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2.0_pReal)
|
||||||
enddo; enddo
|
enddo; enddo
|
||||||
if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
if(grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
||||||
|
|
||||||
|
@ -651,7 +654,7 @@ real(pReal) function utilities_curlRMS()
|
||||||
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
+ sum(real(curl_fourier)**2.0_pReal + aimag(curl_fourier)**2.0_pReal) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||||
enddo; enddo
|
enddo; enddo
|
||||||
|
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
|
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
|
||||||
if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
if(grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||||
|
@ -816,7 +819,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
||||||
|
|
||||||
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
|
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
|
||||||
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
|
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
|
if (debugRotation) print'(/,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
|
||||||
' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal
|
' Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal
|
||||||
if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av)
|
if(present(rotation_BC)) P_av = rotation_BC%rotate(P_av)
|
||||||
|
@ -840,21 +843,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
||||||
end do
|
end do
|
||||||
|
|
||||||
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
|
valueAndRank = [dPdF_norm_max,real(worldrank,pReal)]
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, PETSC_COMM_WORLD, ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
|
call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
|
valueAndRank = [dPdF_norm_min,real(worldrank,pReal)]
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, PETSC_COMM_WORLD, ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),PETSC_COMM_WORLD, ierr)
|
call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
|
C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min)
|
||||||
|
|
||||||
C_volAvg = sum(homogenization_dPdF,dim=5)
|
C_volAvg = sum(homogenization_dPdF,dim=5)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
if (ierr /= 0) error stop 'MPI error'
|
if (ierr /= 0) error stop 'MPI error'
|
||||||
C_volAvg = C_volAvg * wgt
|
C_volAvg = C_volAvg * wgt
|
||||||
|
|
||||||
|
@ -909,7 +912,7 @@ function utilities_forwardField(timeinc,field_lastInc,rate,aim)
|
||||||
utilities_forwardField = field_lastInc + rate*timeinc
|
utilities_forwardField = field_lastInc + rate*timeinc
|
||||||
if (present(aim)) then !< correct to match average
|
if (present(aim)) then !< correct to match average
|
||||||
fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt
|
fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
fieldDiff = fieldDiff - aim
|
fieldDiff = fieldDiff - aim
|
||||||
utilities_forwardField = utilities_forwardField - &
|
utilities_forwardField = utilities_forwardField - &
|
||||||
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3)
|
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3)
|
||||||
|
@ -982,8 +985,13 @@ subroutine utilities_updateCoords(F)
|
||||||
rank_t, rank_b, &
|
rank_t, rank_b, &
|
||||||
c, &
|
c, &
|
||||||
ierr
|
ierr
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
type(MPI_Request), dimension(4) :: request
|
||||||
|
type(MPI_Status), dimension(4) :: status
|
||||||
|
#else
|
||||||
integer, dimension(4) :: request
|
integer, dimension(4) :: request
|
||||||
integer, dimension(MPI_STATUS_SIZE,4) :: status
|
integer, dimension(MPI_STATUS_SIZE,4) :: status
|
||||||
|
#endif
|
||||||
real(pReal), dimension(3) :: step
|
real(pReal), dimension(3) :: step
|
||||||
real(pReal), dimension(3,3) :: Favg
|
real(pReal), dimension(3,3) :: Favg
|
||||||
integer, dimension(3) :: me
|
integer, dimension(3) :: me
|
||||||
|
@ -1018,7 +1026,7 @@ subroutine utilities_updateCoords(F)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! average F
|
! average F
|
||||||
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
|
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
|
||||||
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -1029,20 +1037,24 @@ subroutine utilities_updateCoords(F)
|
||||||
rank_b = modulo(worldrank-1,worldsize)
|
rank_b = modulo(worldrank-1,worldsize)
|
||||||
|
|
||||||
! send bottom layer to process below
|
! send bottom layer to process below
|
||||||
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,PETSC_COMM_WORLD,request(1),ierr)
|
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,PETSC_COMM_WORLD,request(2),ierr)
|
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
! send top layer to process above
|
! send top layer to process above
|
||||||
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,PETSC_COMM_WORLD,request(3),ierr)
|
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,PETSC_COMM_WORLD,request(4),ierr)
|
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
|
||||||
call MPI_Waitall(4,request,status,ierr)
|
call MPI_Waitall(4,request,status,ierr)
|
||||||
if(ierr /=0) error stop 'MPI error'
|
if(ierr /=0) error stop 'MPI error'
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
! ToDo
|
||||||
|
#else
|
||||||
if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error'
|
if(any(status(MPI_ERROR,:) /= 0)) error stop 'MPI error'
|
||||||
|
#endif
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! calculate nodal displacements
|
! calculate nodal displacements
|
||||||
|
|
|
@ -12,6 +12,7 @@ module homogenization
|
||||||
use material
|
use material
|
||||||
use phase
|
use phase
|
||||||
use discretization
|
use discretization
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
use results
|
use results
|
||||||
use lattice
|
use lattice
|
||||||
|
|
|
@ -6,10 +6,12 @@ module FEM_utilities
|
||||||
#include <petsc/finclude/petscdmplex.h>
|
#include <petsc/finclude/petscdmplex.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
#include <petsc/finclude/petscis.h>
|
#include <petsc/finclude/petscis.h>
|
||||||
|
use PETScDMplex
|
||||||
use PETScdmplex
|
use PETScDMDA
|
||||||
use PETScdmda
|
use PETScIS
|
||||||
use PETScis
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use config
|
use config
|
||||||
|
@ -165,7 +167,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData)
|
||||||
cutBack = .false. ! reset cutBack status
|
cutBack = .false. ! reset cutBack status
|
||||||
|
|
||||||
P_av = sum(homogenization_P,dim=3) * wgt
|
P_av = sum(homogenization_P,dim=3) * wgt
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
end subroutine utilities_constitutiveResponse
|
end subroutine utilities_constitutiveResponse
|
||||||
|
|
||||||
|
|
|
@ -8,9 +8,12 @@ module discretization_mesh
|
||||||
#include <petsc/finclude/petscdmplex.h>
|
#include <petsc/finclude/petscdmplex.h>
|
||||||
#include <petsc/finclude/petscis.h>
|
#include <petsc/finclude/petscis.h>
|
||||||
#include <petsc/finclude/petscdmda.h>
|
#include <petsc/finclude/petscdmda.h>
|
||||||
use PETScdmplex
|
use PETScDMplex
|
||||||
use PETScdmda
|
use PETScDMDA
|
||||||
use PETScis
|
use PETScIS
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use parallelization
|
use parallelization
|
||||||
|
@ -111,9 +114,9 @@ subroutine discretization_mesh_init(restart)
|
||||||
! get number of IDs in face sets (for boundary conditions?)
|
! get number of IDs in face sets (for boundary conditions?)
|
||||||
call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr)
|
call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
if (worldrank == 0) then
|
if (worldrank == 0) then
|
||||||
call DMClone(globalMesh,geomMesh,ierr)
|
call DMClone(globalMesh,geomMesh,ierr)
|
||||||
|
@ -134,7 +137,7 @@ subroutine discretization_mesh_init(restart)
|
||||||
CHKERRQ(ierr)
|
CHKERRQ(ierr)
|
||||||
call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr)
|
call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr)
|
||||||
endif
|
endif
|
||||||
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
|
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
|
||||||
|
|
||||||
call DMDestroy(globalMesh,ierr); CHKERRQ(ierr)
|
call DMDestroy(globalMesh,ierr); CHKERRQ(ierr)
|
||||||
|
|
||||||
|
|
|
@ -8,11 +8,13 @@ module mesh_mechanical_FEM
|
||||||
#include <petsc/finclude/petscdmplex.h>
|
#include <petsc/finclude/petscdmplex.h>
|
||||||
#include <petsc/finclude/petscdm.h>
|
#include <petsc/finclude/petscdm.h>
|
||||||
#include <petsc/finclude/petsc.h>
|
#include <petsc/finclude/petsc.h>
|
||||||
|
use PETScSNES
|
||||||
use PETScsnes
|
|
||||||
use PETScDM
|
use PETScDM
|
||||||
use PETScDMplex
|
use PETScDMplex
|
||||||
use PETScDT
|
use PETScDT
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
use FEM_utilities
|
use FEM_utilities
|
||||||
|
@ -396,7 +398,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! evaluate constitutive response
|
! evaluate constitutive response
|
||||||
call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData)
|
call Utilities_constitutiveResponse(params%timeinc,P_av,ForwardData)
|
||||||
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
|
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr)
|
||||||
ForwardData = .false.
|
ForwardData = .false.
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -6,11 +6,15 @@ module parallelization
|
||||||
use, intrinsic :: ISO_fortran_env, only: &
|
use, intrinsic :: ISO_fortran_env, only: &
|
||||||
OUTPUT_UNIT
|
OUTPUT_UNIT
|
||||||
|
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
use petscsys
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
!$ use OMP_LIB
|
!$ use OMP_LIB
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
use prec
|
use prec
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
@ -20,7 +24,7 @@ module parallelization
|
||||||
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
|
worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only)
|
||||||
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
|
worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only)
|
||||||
|
|
||||||
#ifdef PETSc
|
#ifdef PETSC
|
||||||
public :: &
|
public :: &
|
||||||
parallelization_init
|
parallelization_init
|
||||||
|
|
||||||
|
@ -60,12 +64,12 @@ subroutine parallelization_init
|
||||||
#endif
|
#endif
|
||||||
CHKERRQ(petsc_err)
|
CHKERRQ(petsc_err)
|
||||||
|
|
||||||
call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err)
|
call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err)
|
||||||
if (err /= 0) error stop 'Could not determine worldrank'
|
if (err /= 0) error stop 'Could not determine worldrank'
|
||||||
|
|
||||||
if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>'
|
if (worldrank == 0) print'(/,a)', ' <<<+- parallelization init -+>>>'
|
||||||
|
|
||||||
call MPI_Comm_size(PETSC_COMM_WORLD,worldsize,err)
|
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err)
|
||||||
if (err /= 0) error stop 'Could not determine worldsize'
|
if (err /= 0) error stop 'Could not determine worldsize'
|
||||||
if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize
|
if (worldrank == 0) print'(a,i3)', ' MPI processes: ',worldsize
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ module phase
|
||||||
use lattice
|
use lattice
|
||||||
use discretization
|
use discretization
|
||||||
use parallelization
|
use parallelization
|
||||||
|
use HDF5
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
|
@ -74,7 +74,7 @@ contains
|
||||||
module subroutine damage_init
|
module subroutine damage_init
|
||||||
|
|
||||||
integer :: &
|
integer :: &
|
||||||
ph, & !< counter in phase loop
|
ph, &
|
||||||
Nmembers
|
Nmembers
|
||||||
class(tNode), pointer :: &
|
class(tNode), pointer :: &
|
||||||
phases, &
|
phases, &
|
||||||
|
@ -105,10 +105,10 @@ module subroutine damage_init
|
||||||
if (sources%length == 1) then
|
if (sources%length == 1) then
|
||||||
damage_active = .true.
|
damage_active = .true.
|
||||||
source => sources%get(1)
|
source => sources%get(1)
|
||||||
param(ph)%mu = source%get_asFloat('M',defaultVal=0.0_pReal)
|
param(ph)%mu = source%get_asFloat('mu',defaultVal=0.0_pReal) ! ToDo: make mandatory?
|
||||||
param(ph)%K(1,1) = source%get_asFloat('D_11',defaultVal=0.0_pReal)
|
param(ph)%K(1,1) = source%get_asFloat('K_11',defaultVal=0.0_pReal) ! ToDo: make mandatory?
|
||||||
param(ph)%K(3,3) = source%get_asFloat('D_33',defaultVal=0.0_pReal)
|
param(ph)%K(3,3) = source%get_asFloat('K_33',defaultVal=0.0_pReal) ! ToDo: depends on symmetry
|
||||||
param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase%get_asString('lattice'))
|
param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase_lattice(ph))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -267,15 +267,11 @@ module subroutine mechanical_init(materials,phases)
|
||||||
enddo
|
enddo
|
||||||
|
|
||||||
|
|
||||||
! initialize elasticity
|
|
||||||
call elastic_init(phases)
|
call elastic_init(phases)
|
||||||
|
|
||||||
! initialize plasticity
|
|
||||||
allocate(plasticState(phases%length))
|
allocate(plasticState(phases%length))
|
||||||
allocate(phase_plasticity(phases%length),source = PLASTICITY_undefined_ID)
|
allocate(phase_plasticity(phases%length),source = PLASTICITY_undefined_ID)
|
||||||
|
|
||||||
call plastic_init()
|
call plastic_init()
|
||||||
|
|
||||||
do ph = 1,phases%length
|
do ph = 1,phases%length
|
||||||
plasticState(ph)%state0 = plasticState(ph)%state
|
plasticState(ph)%state0 = plasticState(ph)%state
|
||||||
enddo
|
enddo
|
||||||
|
|
|
@ -232,6 +232,8 @@ module subroutine plastic_init
|
||||||
where(plastic_dislotungsten_init()) phase_plasticity = PLASTICITY_DISLOTUNGSTEN_ID
|
where(plastic_dislotungsten_init()) phase_plasticity = PLASTICITY_DISLOTUNGSTEN_ID
|
||||||
where(plastic_nonlocal_init()) phase_plasticity = PLASTICITY_NONLOCAL_ID
|
where(plastic_nonlocal_init()) phase_plasticity = PLASTICITY_NONLOCAL_ID
|
||||||
|
|
||||||
|
if (any(phase_plasticity == PLASTICITY_undefined_ID)) call IO_error(201)
|
||||||
|
|
||||||
end subroutine plastic_init
|
end subroutine plastic_init
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -32,11 +32,12 @@ submodule(phase:plastic) dislotwin
|
||||||
xi_sb = 1.0_pReal, & !< value for shearband resistance
|
xi_sb = 1.0_pReal, & !< value for shearband resistance
|
||||||
v_sb = 1.0_pReal, & !< value for shearband velocity_0
|
v_sb = 1.0_pReal, & !< value for shearband velocity_0
|
||||||
E_sb = 1.0_pReal, & !< activation energy for shear bands
|
E_sb = 1.0_pReal, & !< activation energy for shear bands
|
||||||
Gamma_sf_0K = 1.0_pReal, & !< stacking fault energy at zero K
|
|
||||||
dGamma_sf_dT = 1.0_pReal, & !< temperature dependence of stacking fault energy
|
|
||||||
delta_G = 1.0_pReal, & !< Free energy difference between austensite and martensite
|
delta_G = 1.0_pReal, & !< Free energy difference between austensite and martensite
|
||||||
i_tr = 1.0_pReal, & !< adjustment parameter to calculate MFP for transformation
|
i_tr = 1.0_pReal, & !< adjustment parameter to calculate MFP for transformation
|
||||||
h = 1.0_pReal !< Stack height of hex nucleus
|
h = 1.0_pReal, & !< Stack height of hex nucleus
|
||||||
|
T_ref = 0.0_pReal
|
||||||
|
real(pReal), dimension(2) :: &
|
||||||
|
Gamma_sf = 0.0_pReal
|
||||||
real(pReal), allocatable, dimension(:) :: &
|
real(pReal), allocatable, dimension(:) :: &
|
||||||
b_sl, & !< absolute length of Burgers vector [m] for each slip system
|
b_sl, & !< absolute length of Burgers vector [m] for each slip system
|
||||||
b_tw, & !< absolute length of Burgers vector [m] for each twin system
|
b_tw, & !< absolute length of Burgers vector [m] for each twin system
|
||||||
|
@ -220,13 +221,9 @@ module function plastic_dislotwin_init() result(myPlasticity)
|
||||||
prm%D_a = pl%get_asFloat('D_a')
|
prm%D_a = pl%get_asFloat('D_a')
|
||||||
prm%D_0 = pl%get_asFloat('D_0')
|
prm%D_0 = pl%get_asFloat('D_0')
|
||||||
prm%Q_cl = pl%get_asFloat('Q_cl')
|
prm%Q_cl = pl%get_asFloat('Q_cl')
|
||||||
prm%ExtendedDislocations = pl%get_asBool('extend_dislocations',defaultVal = .false.)
|
|
||||||
if (prm%ExtendedDislocations) then
|
|
||||||
prm%Gamma_sf_0K = pl%get_asFloat('Gamma_sf_0K')
|
|
||||||
prm%dGamma_sf_dT = pl%get_asFloat('dGamma_sf_dT')
|
|
||||||
endif
|
|
||||||
|
|
||||||
prm%omitDipoles = pl%get_asBool('omit_dipoles',defaultVal = .false.)
|
prm%ExtendedDislocations = pl%get_asBool('extend_dislocations',defaultVal = .false.)
|
||||||
|
prm%omitDipoles = pl%get_asBool('omit_dipoles',defaultVal = .false.)
|
||||||
|
|
||||||
! multiplication factor according to crystal structure (nearest neighbors bcc vs fcc/hex)
|
! multiplication factor according to crystal structure (nearest neighbors bcc vs fcc/hex)
|
||||||
! details: Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981
|
! details: Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981
|
||||||
|
@ -384,11 +381,14 @@ module function plastic_dislotwin_init() result(myPlasticity)
|
||||||
if(prm%sum_N_sl + prm%sum_N_tw + prm%sum_N_tw > 0) &
|
if(prm%sum_N_sl + prm%sum_N_tw + prm%sum_N_tw > 0) &
|
||||||
prm%D = pl%get_asFloat('D')
|
prm%D = pl%get_asFloat('D')
|
||||||
|
|
||||||
twinOrSlipActive: if (prm%sum_N_tw + prm%sum_N_tr > 0) then
|
if (prm%sum_N_tw + prm%sum_N_tr > 0) &
|
||||||
prm%Gamma_sf_0K = pl%get_asFloat('Gamma_sf_0K')
|
prm%V_cs = pl%get_asFloat('V_cs')
|
||||||
prm%dGamma_sf_dT = pl%get_asFloat('dGamma_sf_dT')
|
|
||||||
prm%V_cs = pl%get_asFloat('V_cs')
|
if (prm%sum_N_tw + prm%sum_N_tr > 0 .or. prm%ExtendedDislocations) then
|
||||||
endif twinOrSlipActive
|
prm%T_ref = pl%get_asFloat('T_ref')
|
||||||
|
prm%Gamma_sf(1) = pl%get_asFloat('Gamma_sf')
|
||||||
|
prm%Gamma_sf(2) = pl%get_asFloat('Gamma_sf,T',defaultVal=0.0_pReal)
|
||||||
|
endif
|
||||||
|
|
||||||
slipAndTwinActive: if (prm%sum_N_sl * prm%sum_N_tw > 0) then
|
slipAndTwinActive: if (prm%sum_N_sl * prm%sum_N_tw > 0) then
|
||||||
prm%h_sl_tw = lattice_interaction_SlipByTwin(N_sl,N_tw,&
|
prm%h_sl_tw = lattice_interaction_SlipByTwin(N_sl,N_tw,&
|
||||||
|
@ -689,7 +689,7 @@ module subroutine dislotwin_dotState(Mp,T,ph,en)
|
||||||
! Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981
|
! Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981
|
||||||
sigma_cl = dot_product(prm%n0_sl(1:3,i),matmul(Mp,prm%n0_sl(1:3,i)))
|
sigma_cl = dot_product(prm%n0_sl(1:3,i),matmul(Mp,prm%n0_sl(1:3,i)))
|
||||||
b_d = merge(24.0_pReal*PI*(1.0_pReal - prm%nu)/(2.0_pReal + prm%nu) &
|
b_d = merge(24.0_pReal*PI*(1.0_pReal - prm%nu)/(2.0_pReal + prm%nu) &
|
||||||
* (prm%Gamma_sf_0K + prm%dGamma_sf_dT * T) / (prm%mu*prm%b_sl(i)), &
|
* (prm%Gamma_sf(1) + prm%Gamma_sf(2) * T) / (prm%mu*prm%b_sl(i)), &
|
||||||
1.0_pReal, &
|
1.0_pReal, &
|
||||||
prm%ExtendedDislocations)
|
prm%ExtendedDislocations)
|
||||||
v_cl = 2.0_pReal*prm%omega*b_d**2.0_pReal*exp(-prm%Q_cl/(kB*T)) &
|
v_cl = 2.0_pReal*prm%omega*b_d**2.0_pReal*exp(-prm%Q_cl/(kB*T)) &
|
||||||
|
@ -752,7 +752,7 @@ module subroutine dislotwin_dependentState(T,ph,en)
|
||||||
sumf_tw = sum(stt%f_tw(1:prm%sum_N_tw,en))
|
sumf_tw = sum(stt%f_tw(1:prm%sum_N_tw,en))
|
||||||
sumf_tr = sum(stt%f_tr(1:prm%sum_N_tr,en))
|
sumf_tr = sum(stt%f_tr(1:prm%sum_N_tr,en))
|
||||||
|
|
||||||
Gamma = prm%Gamma_sf_0K + prm%dGamma_sf_dT * T
|
Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * T
|
||||||
|
|
||||||
!* rescaled volume fraction for topology
|
!* rescaled volume fraction for topology
|
||||||
f_over_t_tw = stt%f_tw(1:prm%sum_N_tw,en)/prm%t_tw ! this is per system ...
|
f_over_t_tw = stt%f_tw(1:prm%sum_N_tw,en)/prm%t_tw ! this is per system ...
|
||||||
|
|
|
@ -31,8 +31,7 @@ submodule(phase:plastic) isotropic
|
||||||
|
|
||||||
type :: tIsotropicState
|
type :: tIsotropicState
|
||||||
real(pReal), pointer, dimension(:) :: &
|
real(pReal), pointer, dimension(:) :: &
|
||||||
xi, &
|
xi
|
||||||
gamma
|
|
||||||
end type tIsotropicState
|
end type tIsotropicState
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -122,7 +121,7 @@ module function plastic_isotropic_init() result(myPlasticity)
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
! allocate state arrays
|
! allocate state arrays
|
||||||
Nmembers = count(material_phaseID == ph)
|
Nmembers = count(material_phaseID == ph)
|
||||||
sizeDotState = size(['xi ','gamma'])
|
sizeDotState = size(['xi'])
|
||||||
sizeState = sizeDotState
|
sizeState = sizeDotState
|
||||||
|
|
||||||
call phase_allocateState(plasticState(ph),Nmembers,sizeState,sizeDotState,0)
|
call phase_allocateState(plasticState(ph),Nmembers,sizeState,sizeDotState,0)
|
||||||
|
@ -135,11 +134,6 @@ module function plastic_isotropic_init() result(myPlasticity)
|
||||||
plasticState(ph)%atol(1) = pl%get_asFloat('atol_xi',defaultVal=1.0_pReal)
|
plasticState(ph)%atol(1) = pl%get_asFloat('atol_xi',defaultVal=1.0_pReal)
|
||||||
if (plasticState(ph)%atol(1) < 0.0_pReal) extmsg = trim(extmsg)//' atol_xi'
|
if (plasticState(ph)%atol(1) < 0.0_pReal) extmsg = trim(extmsg)//' atol_xi'
|
||||||
|
|
||||||
stt%gamma => plasticState(ph)%state (2,:)
|
|
||||||
dot%gamma => plasticState(ph)%dotState(2,:)
|
|
||||||
plasticState(ph)%atol(2) = pl%get_asFloat('atol_gamma',defaultVal=1.0e-6_pReal)
|
|
||||||
if (plasticState(ph)%atol(2) < 0.0_pReal) extmsg = trim(extmsg)//' atol_gamma'
|
|
||||||
|
|
||||||
end associate
|
end associate
|
||||||
|
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -285,8 +279,6 @@ module subroutine isotropic_dotState(Mp,ph,en)
|
||||||
dot%xi(en) = 0.0_pReal
|
dot%xi(en) = 0.0_pReal
|
||||||
endif
|
endif
|
||||||
|
|
||||||
dot%gamma(en) = dot_gamma ! ToDo: not really used
|
|
||||||
|
|
||||||
end associate
|
end associate
|
||||||
|
|
||||||
end subroutine isotropic_dotState
|
end subroutine isotropic_dotState
|
||||||
|
|
|
@ -100,10 +100,10 @@ module subroutine thermal_init(phases)
|
||||||
allocate(current(ph)%dot_T(Nmembers),source=0.0_pReal)
|
allocate(current(ph)%dot_T(Nmembers),source=0.0_pReal)
|
||||||
phase => phases%get(ph)
|
phase => phases%get(ph)
|
||||||
thermal => phase%get('thermal',defaultVal=emptyDict)
|
thermal => phase%get('thermal',defaultVal=emptyDict)
|
||||||
param(ph)%C_p = thermal%get_asFloat('C_p',defaultVal=0.0_pReal)
|
param(ph)%C_p = thermal%get_asFloat('C_p',defaultVal=0.0_pReal) ! ToDo: make mandatory?
|
||||||
param(ph)%K(1,1) = thermal%get_asFloat('K_11',defaultVal=0.0_pReal) ! ToDo: make mandatory?
|
param(ph)%K(1,1) = thermal%get_asFloat('K_11',defaultVal=0.0_pReal) ! ToDo: make mandatory?
|
||||||
param(ph)%K(3,3) = thermal%get_asFloat('K_33',defaultVal=0.0_pReal) ! ToDo: depends on symmtery
|
param(ph)%K(3,3) = thermal%get_asFloat('K_33',defaultVal=0.0_pReal) ! ToDo: depends on symmtery
|
||||||
param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase%get_asString('lattice'))
|
param(ph)%K = lattice_applyLatticeSymmetry33(param(ph)%K,phase_lattice(ph))
|
||||||
|
|
||||||
sources => thermal%get('source',defaultVal=emptyList)
|
sources => thermal%get('source',defaultVal=emptyList)
|
||||||
thermal_Nsources(ph) = sources%length
|
thermal_Nsources(ph) = sources%length
|
||||||
|
|
|
@ -7,9 +7,6 @@
|
||||||
subroutine quit(stop_id)
|
subroutine quit(stop_id)
|
||||||
#include <petsc/finclude/petscsys.h>
|
#include <petsc/finclude/petscsys.h>
|
||||||
use PetscSys
|
use PetscSys
|
||||||
#ifdef _OPENMP
|
|
||||||
use MPI
|
|
||||||
#endif
|
|
||||||
use HDF5
|
use HDF5
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
|
|
@ -5,12 +5,18 @@
|
||||||
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
module results
|
module results
|
||||||
|
use prec
|
||||||
use DAMASK_interface
|
use DAMASK_interface
|
||||||
use parallelization
|
use parallelization
|
||||||
use IO
|
use IO
|
||||||
use HDF5_utilities
|
use HDF5_utilities
|
||||||
#ifdef PETSc
|
use HDF5
|
||||||
use PETSC
|
#ifdef PETSC
|
||||||
|
#include <petsc/finclude/petscsys.h>
|
||||||
|
use PETScSys
|
||||||
|
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||||
|
use MPI_f08
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
implicit none
|
implicit none
|
||||||
|
@ -451,7 +457,7 @@ subroutine results_mapping_phase(ID,entry,label)
|
||||||
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
|
||||||
#ifndef PETSc
|
#ifndef PETSC
|
||||||
entryGlobal = entry -1 ! 0-based
|
entryGlobal = entry -1 ! 0-based
|
||||||
#else
|
#else
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -459,7 +465,7 @@ subroutine results_mapping_phase(ID,entry,label)
|
||||||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
|
||||||
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process
|
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
|
||||||
if(ierr /= 0) error stop 'MPI error'
|
if(ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
entryOffset = 0
|
entryOffset = 0
|
||||||
|
@ -468,7 +474,7 @@ subroutine results_mapping_phase(ID,entry,label)
|
||||||
entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1
|
entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1
|
||||||
enddo
|
enddo
|
||||||
enddo
|
enddo
|
||||||
call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)! get offset at each process
|
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
|
||||||
if(ierr /= 0) error stop 'MPI error'
|
if(ierr /= 0) error stop 'MPI error'
|
||||||
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
||||||
do co = 1, size(ID,1)
|
do co = 1, size(ID,1)
|
||||||
|
@ -604,7 +610,7 @@ subroutine results_mapping_homogenization(ID,entry,label)
|
||||||
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
call h5pcreate_f(H5P_DATASET_XFER_F, plist_id, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
|
||||||
#ifndef PETSc
|
#ifndef PETSC
|
||||||
entryGlobal = entry -1 ! 0-based
|
entryGlobal = entry -1 ! 0-based
|
||||||
#else
|
#else
|
||||||
!--------------------------------------------------------------------------------------------------
|
!--------------------------------------------------------------------------------------------------
|
||||||
|
@ -612,14 +618,14 @@ subroutine results_mapping_homogenization(ID,entry,label)
|
||||||
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||||
if(hdferr < 0) error stop 'HDF5 error'
|
if(hdferr < 0) error stop 'HDF5 error'
|
||||||
|
|
||||||
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get output at each process
|
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process
|
||||||
if(ierr /= 0) error stop 'MPI error'
|
if(ierr /= 0) error stop 'MPI error'
|
||||||
|
|
||||||
entryOffset = 0
|
entryOffset = 0
|
||||||
do ce = 1, size(ID,1)
|
do ce = 1, size(ID,1)
|
||||||
entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1
|
entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1
|
||||||
enddo
|
enddo
|
||||||
call MPI_allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get offset at each process
|
call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process
|
||||||
if(ierr /= 0) error stop 'MPI error'
|
if(ierr /= 0) error stop 'MPI error'
|
||||||
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2)
|
||||||
do ce = 1, size(ID,1)
|
do ce = 1, size(ID,1)
|
||||||
|
|
Loading…
Reference in New Issue