From d66b9e1896b227f963704e93ff936c820efacf0c Mon Sep 17 00:00:00 2001 From: Sheng Zhang Date: Mon, 6 Dec 2021 16:00:37 +0100 Subject: [PATCH 01/58] subdivision of interaction matrix for hcp --- .../mechanical/plastic/phenopowerlaw_Mg.yaml | 10 ++- .../mechanical/plastic/phenopowerlaw_Ti.yaml | 7 ++- src/lattice.f90 | 63 ++++++++++--------- 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml index 0ae04928d..5bc4a2e03 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml @@ -23,9 +23,13 @@ f_sat_sl-tw: 10.0 h_0_sl-sl: 500.0e+6 h_0_tw-tw: 50.0e+6 h_0_tw-sl: 150.0e+6 -h_sl-sl: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, - 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, - 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, +1.0, 1.0] # unused entries are indicated by -1.0 +h_sl-sl: [+1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + +1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, + -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, + -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, + -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, + +1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, + -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # unused entries are indicated by -1.0 h_tw-tw: [+1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0] # unused entries are indicated by -1.0 h_tw-sl: [1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml index 1db6adc6b..c238e6afb 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml @@ -20,5 +20,8 @@ xi_inf_sl: [568.e+6, 150.e+7, 0.0, 3420.e+6] # L. Wang et al. : # xi_0_sl: [127.e+6, 96.e+6, 0.0, 240.e+6] -h_sl-sl: [+1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, - -1.0, -1.0, +1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0] # unused entries are indicated by -1.0 +h_sl-sl: [+1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, + -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, + +1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + +1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + +1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # unused entries are indicated by -1.0 diff --git a/src/lattice.f90 b/src/lattice.f90 index 50d4b6c51..68240390f 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -750,40 +750,41 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( integer, dimension(HEX_NSLIP,HEX_NSLIP), parameter :: & HEX_INTERACTIONSLIPSLIP = reshape( [& ! basal prism 1. pyr 1. pyr 2. pyr - 1, 2, 2, 3, 3, 3, 7, 7, 7, 7, 7, 7, 13,13,13,13,13,13,13,13,13,13,13,13, 21,21,21,21,21,21, & ! -----> acting (forest) - 2, 1, 2, 3, 3, 3, 7, 7, 7, 7, 7, 7, 13,13,13,13,13,13,13,13,13,13,13,13, 21,21,21,21,21,21, & ! | basal - 2, 2, 1, 3, 3, 3, 7, 7, 7, 7, 7, 7, 13,13,13,13,13,13,13,13,13,13,13,13, 21,21,21,21,21,21, & ! | - ! v - 6, 6, 6, 4, 5, 5, 8, 8, 8, 8, 8, 8, 14,14,14,14,14,14,14,14,14,14,14,14, 22,22,22,22,22,22, & ! reacting (primary) - 6, 6, 6, 5, 4, 5, 8, 8, 8, 8, 8, 8, 14,14,14,14,14,14,14,14,14,14,14,14, 22,22,22,22,22,22, & ! prism - 6, 6, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 14,14,14,14,14,14,14,14,14,14,14,14, 22,22,22,22,22,22, & + 1, 2, 2, 3, 4, 4, 9,10,9,9,10,9, 20,21,22,22,21,20,20,21,22,22,21,20, 47,47,48,47,47,48, & ! -----> acting (forest) + 2, 1, 2, 4, 3, 4, 10,9,9,10,9,9, 22,22,21,20,20,21,22,22,21,20,20,21, 47,48,47,47,48,47, & ! | basal + 2, 2, 1, 4, 4, 3, 9,9,10,9,9,10, 21,20,20,21,22,22,21,20,20,21,22,22, 48,47,47,48,47,47, & ! | + ! v + 7, 8, 8, 5, 6, 6, 11,12,11,11,12,11, 23,24,25,25,24,23,23,24,25,25,24,23, 49,49,50,49,49,50, & ! reacting (primary) + 8, 7, 8, 6, 5, 6, 12,11,11,12,11,11, 25,25,24,23,23,24,25,25,24,23,23,24, 49,50,49,49,50,49, & ! prism + 8, 8, 7, 6, 6, 5, 11,11,12,11,11,12, 24,23,23,24,25,25,24,23,23,24,25,25, 50,49,49,50,49,49, & - 12,12,12, 11,11,11, 9,10,10,10,10,10, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & - 12,12,12, 11,11,11, 10, 9,10,10,10,10, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & - 12,12,12, 11,11,11, 10,10, 9,10,10,10, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & - 12,12,12, 11,11,11, 10,10,10, 9,10,10, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & ! 1. pyr - 12,12,12, 11,11,11, 10,10,10,10, 9,10, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & - 12,12,12, 11,11,11, 10,10,10,10,10, 9, 15,15,15,15,15,15,15,15,15,15,15,15, 23,23,23,23,23,23, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 16,17,17,17,17,17,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,16,17,17,17,17,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,16,17,17,17,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,16,17,17,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,16,17,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,16,17,17,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,16,17,17,17,17,17, 24,24,24,24,24,24, & ! 1. pyr - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,17,16,17,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,17,17,16,17,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,17,17,17,16,17,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,17,17,17,17,16,17, 24,24,24,24,24,24, & - 20,20,20, 19,19,19, 18,18,18,18,18,18, 17,17,17,17,17,17,17,17,17,17,17,16, 24,24,24,24,24,24, & + 18,19,18, 16,17,16, 13,14,14,15,14,14, 26,26,27,28,28,27,29,29,27,28,28,27, 51,52,51,51,52,51, & + 19,18,18, 17,16,16, 14,13,14,14,15,14, 28,27,26,26,27,28,28,27,29,29,27,28, 51,51,52,51,51,52, & + 18,18,19, 16,16,17, 14,14,13,14,14,15, 27,28,28,27,26,26,27,28,28,27,29,29, 52,51,51,52,51,51, & + 18,19,18, 16,17,16, 15,14,14,13,14,14, 29,29,27,28,28,27,26,26,27,28,28,27, 51,52,51,51,52,51, & ! 1. pyr + 19,18,18, 17,16,16, 14,15,14,14,13,14, 28,27,29,29,27,28,28,27,26,26,27,28, 51,51,52,51,51,52, & + 18,18,19, 16,16,17, 14,14,15,14,14,13, 27,28,28,27,29,29,27,28,28,27,26,26, 52,51,51,52,51,51, & - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 25,26,26,26,26,26, & - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 26,25,26,26,26,26, & - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 26,26,25,26,26,26, & - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 26,26,26,25,26,26, & ! 2. pyr - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 26,26,26,26,25,26, & - 30,30,30, 29,29,29, 28,28,28,28,28,28, 27,27,27,27,27,27,27,27,27,27,27,27, 26,26,26,26,26,25 & + 44,45,46, 41,42,43, 37,38,39,40,38,39, 30,31,32,32,32,33,34,35,32,32,32,36, 53,54,55,53,54,56, & + 46,45,44, 43,42,41, 37,39,38,40,39,38, 31,30,36,32,32,32,35,34,33,32,32,32, 56,54,53,55,54,53, & + 45,46,44, 42,43,41, 39,37,38,39,40,38, 32,36,30,31,32,32,32,33,34,35,32,32, 56,53,54,55,53,54, & + 45,44,46, 42,41,43, 38,37,39,38,40,39, 32,32,31,30,36,32,32,32,35,34,33,32, 53,56,54,53,55,54, & + 46,44,45, 43,41,42, 38,39,37,38,39,40, 32,32,32,36,30,31,32,32,32,33,34,35, 54,56,53,54,55,53, & + 44,46,45, 41,43,42, 39,38,37,39,38,40, 33,32,32,32,31,30,36,32,32,32,35,34, 54,53,56,54,53,55, & + 44,45,46, 41,42,43, 40,38,39,37,38,39, 34,35,32,32,32,36,30,31,32,32,32,33, 53,54,56,53,54,55, & ! 1. pyr + 46,45,44, 43,42,41, 40,39,38,37,39,38, 35,34,33,32,32,32,31,30,36,32,32,32, 55,54,53,56,54,53, & + 45,46,44, 42,43,41, 39,40,38,39,37,38, 32,33,34,35,32,32,32,36,30,31,32,32, 55,53,54,56,53,54, & + 45,44,46, 42,41,43, 38,40,39,38,37,39, 32,32,35,34,33,32,32,32,31,30,36,32, 53,55,54,53,56,54, & + 46,44,45, 43,41,42, 38,39,40,38,39,37, 32,32,32,33,34,35,32,32,32,36,30,31, 54,55,53,54,56,53, & + 44,46,45, 41,43,42, 39,38,40,39,38,37, 36,32,32,32,35,34,33,32,32,32,31,30, 54,53,55,54,53,56, & + + 68,68,69, 66,66,67, 64,64,65,64,65,65, 60,61,61,60,62,62,60,63,63,60,62,62, 57,58,58,59,58,58, & + 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,61,61,60,62,62,60,63,63,60, 58,57,58,58,59,58, & + 69,68,68, 67,66,66, 64,65,64,64,65,64, 63,60,62,62,60,61,61,60,62,62,60,63, 58,58,57,58,58,59, & + 68,68,69, 66,66,67, 64,64,65,64,64,65, 60,63,63,60,62,62,60,61,61,60,62,62, 59,58,58,57,58,58, & ! 2. pyr + 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,63,63,60,62,62,60,61,61,60, 58,59,58,58,57,58, & + 69,68,68, 67,66,66, 64,65,64,64,65,64, 61,60,62,62,60,63,63,60,62,62,60,61, 58,58,59,58,58,57 & ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme) integer, dimension(BCT_NSLIP,BCT_NSLIP), parameter :: & From ea25c8ff40c82764788279793102230a8f2442cc Mon Sep 17 00:00:00 2001 From: Sheng Zhang Date: Mon, 6 Dec 2021 16:55:24 +0100 Subject: [PATCH 02/58] updated tests to have new # of interactions --- PRIVATE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PRIVATE b/PRIVATE index e6e1f93a3..fb4dd2478 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit e6e1f93a36d63348359a81d7c373083a39977694 +Subproject commit fb4dd2478c00eedf2b5e977b4c2c00dfab523632 From a23a714bd0c8f1ee9547035534eb1824ae234f4c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 7 Dec 2021 19:10:24 +0100 Subject: [PATCH 03/58] polishing --- src/lattice.f90 | 63 ++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index 68240390f..a93144f42 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -750,41 +750,40 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( integer, dimension(HEX_NSLIP,HEX_NSLIP), parameter :: & HEX_INTERACTIONSLIPSLIP = reshape( [& ! basal prism 1. pyr 1. pyr 2. pyr - 1, 2, 2, 3, 4, 4, 9,10,9,9,10,9, 20,21,22,22,21,20,20,21,22,22,21,20, 47,47,48,47,47,48, & ! -----> acting (forest) - 2, 1, 2, 4, 3, 4, 10,9,9,10,9,9, 22,22,21,20,20,21,22,22,21,20,20,21, 47,48,47,47,48,47, & ! | basal - 2, 2, 1, 4, 4, 3, 9,9,10,9,9,10, 21,20,20,21,22,22,21,20,20,21,22,22, 48,47,47,48,47,47, & ! | - ! v - 7, 8, 8, 5, 6, 6, 11,12,11,11,12,11, 23,24,25,25,24,23,23,24,25,25,24,23, 49,49,50,49,49,50, & ! reacting (primary) - 8, 7, 8, 6, 5, 6, 12,11,11,12,11,11, 25,25,24,23,23,24,25,25,24,23,23,24, 49,50,49,49,50,49, & ! prism - 8, 8, 7, 6, 6, 5, 11,11,12,11,11,12, 24,23,23,24,25,25,24,23,23,24,25,25, 50,49,49,50,49,49, & + 1, 2, 2, 3, 4, 4, 9,10, 9, 9,10, 9, 20,21,22,22,21,20,20,21,22,22,21,20, 47,47,48,47,47,48, & ! -----> acting (forest) + 2, 1, 2, 4, 3, 4, 10, 9, 9,10, 9, 9, 22,22,21,20,20,21,22,22,21,20,20,21, 47,48,47,47,48,47, & ! | basal + 2, 2, 1, 4, 4, 3, 9, 9,10, 9, 9,10, 21,20,20,21,22,22,21,20,20,21,22,22, 48,47,47,48,47,47, & ! | + ! v + 7, 8, 8, 5, 6, 6, 11,12,11,11,12,11, 23,24,25,25,24,23,23,24,25,25,24,23, 49,49,50,49,49,50, & ! reacting (primary) + 8, 7, 8, 6, 5, 6, 12,11,11,12,11,11, 25,25,24,23,23,24,25,25,24,23,23,24, 49,50,49,49,50,49, & ! prism + 8, 8, 7, 6, 6, 5, 11,11,12,11,11,12, 24,23,23,24,25,25,24,23,23,24,25,25, 50,49,49,50,49,49, & + 18,19,18, 16,17,16, 13,14,14,15,14,14, 26,26,27,28,28,27,29,29,27,28,28,27, 51,52,51,51,52,51, & + 19,18,18, 17,16,16, 14,13,14,14,15,14, 28,27,26,26,27,28,28,27,29,29,27,28, 51,51,52,51,51,52, & + 18,18,19, 16,16,17, 14,14,13,14,14,15, 27,28,28,27,26,26,27,28,28,27,29,29, 52,51,51,52,51,51, & + 18,19,18, 16,17,16, 15,14,14,13,14,14, 29,29,27,28,28,27,26,26,27,28,28,27, 51,52,51,51,52,51, & ! 1. pyr + 19,18,18, 17,16,16, 14,15,14,14,13,14, 28,27,29,29,27,28,28,27,26,26,27,28, 51,51,52,51,51,52, & + 18,18,19, 16,16,17, 14,14,15,14,14,13, 27,28,28,27,29,29,27,28,28,27,26,26, 52,51,51,52,51,51, & - 18,19,18, 16,17,16, 13,14,14,15,14,14, 26,26,27,28,28,27,29,29,27,28,28,27, 51,52,51,51,52,51, & - 19,18,18, 17,16,16, 14,13,14,14,15,14, 28,27,26,26,27,28,28,27,29,29,27,28, 51,51,52,51,51,52, & - 18,18,19, 16,16,17, 14,14,13,14,14,15, 27,28,28,27,26,26,27,28,28,27,29,29, 52,51,51,52,51,51, & - 18,19,18, 16,17,16, 15,14,14,13,14,14, 29,29,27,28,28,27,26,26,27,28,28,27, 51,52,51,51,52,51, & ! 1. pyr - 19,18,18, 17,16,16, 14,15,14,14,13,14, 28,27,29,29,27,28,28,27,26,26,27,28, 51,51,52,51,51,52, & - 18,18,19, 16,16,17, 14,14,15,14,14,13, 27,28,28,27,29,29,27,28,28,27,26,26, 52,51,51,52,51,51, & + 44,45,46, 41,42,43, 37,38,39,40,38,39, 30,31,32,32,32,33,34,35,32,32,32,36, 53,54,55,53,54,56, & + 46,45,44, 43,42,41, 37,39,38,40,39,38, 31,30,36,32,32,32,35,34,33,32,32,32, 56,54,53,55,54,53, & + 45,46,44, 42,43,41, 39,37,38,39,40,38, 32,36,30,31,32,32,32,33,34,35,32,32, 56,53,54,55,53,54, & + 45,44,46, 42,41,43, 38,37,39,38,40,39, 32,32,31,30,36,32,32,32,35,34,33,32, 53,56,54,53,55,54, & + 46,44,45, 43,41,42, 38,39,37,38,39,40, 32,32,32,36,30,31,32,32,32,33,34,35, 54,56,53,54,55,53, & + 44,46,45, 41,43,42, 39,38,37,39,38,40, 33,32,32,32,31,30,36,32,32,32,35,34, 54,53,56,54,53,55, & + 44,45,46, 41,42,43, 40,38,39,37,38,39, 34,35,32,32,32,36,30,31,32,32,32,33, 53,54,56,53,54,55, & ! 1. pyr + 46,45,44, 43,42,41, 40,39,38,37,39,38, 35,34,33,32,32,32,31,30,36,32,32,32, 55,54,53,56,54,53, & + 45,46,44, 42,43,41, 39,40,38,39,37,38, 32,33,34,35,32,32,32,36,30,31,32,32, 55,53,54,56,53,54, & + 45,44,46, 42,41,43, 38,40,39,38,37,39, 32,32,35,34,33,32,32,32,31,30,36,32, 53,55,54,53,56,54, & + 46,44,45, 43,41,42, 38,39,40,38,39,37, 32,32,32,33,34,35,32,32,32,36,30,31, 54,55,53,54,56,53, & + 44,46,45, 41,43,42, 39,38,40,39,38,37, 36,32,32,32,35,34,33,32,32,32,31,30, 54,53,55,54,53,56, & - 44,45,46, 41,42,43, 37,38,39,40,38,39, 30,31,32,32,32,33,34,35,32,32,32,36, 53,54,55,53,54,56, & - 46,45,44, 43,42,41, 37,39,38,40,39,38, 31,30,36,32,32,32,35,34,33,32,32,32, 56,54,53,55,54,53, & - 45,46,44, 42,43,41, 39,37,38,39,40,38, 32,36,30,31,32,32,32,33,34,35,32,32, 56,53,54,55,53,54, & - 45,44,46, 42,41,43, 38,37,39,38,40,39, 32,32,31,30,36,32,32,32,35,34,33,32, 53,56,54,53,55,54, & - 46,44,45, 43,41,42, 38,39,37,38,39,40, 32,32,32,36,30,31,32,32,32,33,34,35, 54,56,53,54,55,53, & - 44,46,45, 41,43,42, 39,38,37,39,38,40, 33,32,32,32,31,30,36,32,32,32,35,34, 54,53,56,54,53,55, & - 44,45,46, 41,42,43, 40,38,39,37,38,39, 34,35,32,32,32,36,30,31,32,32,32,33, 53,54,56,53,54,55, & ! 1. pyr - 46,45,44, 43,42,41, 40,39,38,37,39,38, 35,34,33,32,32,32,31,30,36,32,32,32, 55,54,53,56,54,53, & - 45,46,44, 42,43,41, 39,40,38,39,37,38, 32,33,34,35,32,32,32,36,30,31,32,32, 55,53,54,56,53,54, & - 45,44,46, 42,41,43, 38,40,39,38,37,39, 32,32,35,34,33,32,32,32,31,30,36,32, 53,55,54,53,56,54, & - 46,44,45, 43,41,42, 38,39,40,38,39,37, 32,32,32,33,34,35,32,32,32,36,30,31, 54,55,53,54,56,53, & - 44,46,45, 41,43,42, 39,38,40,39,38,37, 36,32,32,32,35,34,33,32,32,32,31,30, 54,53,55,54,53,56, & - - 68,68,69, 66,66,67, 64,64,65,64,65,65, 60,61,61,60,62,62,60,63,63,60,62,62, 57,58,58,59,58,58, & - 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,61,61,60,62,62,60,63,63,60, 58,57,58,58,59,58, & - 69,68,68, 67,66,66, 64,65,64,64,65,64, 63,60,62,62,60,61,61,60,62,62,60,63, 58,58,57,58,58,59, & - 68,68,69, 66,66,67, 64,64,65,64,64,65, 60,63,63,60,62,62,60,61,61,60,62,62, 59,58,58,57,58,58, & ! 2. pyr - 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,63,63,60,62,62,60,61,61,60, 58,59,58,58,57,58, & - 69,68,68, 67,66,66, 64,65,64,64,65,64, 61,60,62,62,60,63,63,60,62,62,60,61, 58,58,59,58,58,57 & + 68,68,69, 66,66,67, 64,64,65,64,65,65, 60,61,61,60,62,62,60,63,63,60,62,62, 57,58,58,59,58,58, & + 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,61,61,60,62,62,60,63,63,60, 58,57,58,58,59,58, & + 69,68,68, 67,66,66, 64,65,64,64,65,64, 63,60,62,62,60,61,61,60,62,62,60,63, 58,58,57,58,58,59, & + 68,68,69, 66,66,67, 64,64,65,64,64,65, 60,63,63,60,62,62,60,61,61,60,62,62, 59,58,58,57,58,58, & ! 2. pyr + 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,63,63,60,62,62,60,61,61,60, 58,59,58,58,57,58, & + 69,68,68, 67,66,66, 64,65,64,64,65,64, 61,60,62,62,60,63,63,60,62,62,60,61, 58,58,59,58,58,57 & ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme) integer, dimension(BCT_NSLIP,BCT_NSLIP), parameter :: & From 1ddf1e569499829bba4ccbff288dad3711cd62cf Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 21 Dec 2021 23:01:00 +0100 Subject: [PATCH 04/58] support for PETSc with 64bit integers compiles, but untested --- src/grid/grid_damage_spectral.f90 | 12 ++++++------ src/grid/grid_mech_FEM.f90 | 18 +++++++++--------- src/grid/grid_mech_spectral_basic.f90 | 12 ++++++------ src/grid/grid_mech_spectral_polarisation.f90 | 12 ++++++------ src/grid/grid_thermal_spectral.f90 | 12 ++++++------ src/prec.f90 | 9 +++++++++ 6 files changed, 42 insertions(+), 33 deletions(-) diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index ef229368e..69370b211 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -108,16 +108,16 @@ subroutine grid_damage_spectral_init() ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,damage_snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1, 1, worldsize, & - 1, 0, & ! #dof (damage phase field), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid damage_grid,ierr) ! handle, error CHKERRQ(ierr) call SNESSetDM(damage_snes,damage_grid,ierr); CHKERRQ(ierr) ! connect snes to da diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index e1dc3dabe..c4a8588d6 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -164,16 +164,16 @@ subroutine grid_mechanical_FEM_init CHKERRQ(ierr) call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',ierr) CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & - grid(1),grid(2),grid(3), & - 1, 1, worldsize, & - 3, 1, & - [grid(1)],[grid(2)],localK, & + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid mechanical_grid,ierr) CHKERRQ(ierr) call SNESSetDM(mechanical_snes,mechanical_grid,ierr) @@ -196,7 +196,7 @@ subroutine grid_mechanical_FEM_init CHKERRQ(ierr) call SNESSetConvergenceTest(mechanical_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "_converged" CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), ierr) ! ignore linear solve failures + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPetscInt), ierr) ! ignore linear solve failures CHKERRQ(ierr) call SNESSetFromOptions(mechanical_snes,ierr) ! pull it all together with additional cli arguments CHKERRQ(ierr) @@ -687,7 +687,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) matmul(transpose(BMatFull), & matmul(reshape(reshape(homogenization_dPdF(1:3,1:3,1:3,1:3,ele), & shape=[3,3,3,3], order=[2,1,4,3]),shape=[9,9]),BMatFull))*detJ - call MatSetValuesStencil(Jac,24,row,24,col,K_ele,ADD_VALUES,ierr) + call MatSetValuesStencil(Jac,24_pPETScInt,row,24_pPetscInt,col,K_ele,ADD_VALUES,ierr) CHKERRQ(ierr) enddo; enddo; enddo call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) @@ -700,7 +700,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) diag = (C_volAvg(1,1,1,1)/delta(1)**2 + & C_volAvg(2,2,2,2)/delta(2)**2 + & C_volAvg(3,3,3,3)/delta(3)**2)*detJ - call MatZeroRowsColumns(Jac,size(rows),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,ierr) + call MatZeroRowsColumns(Jac,size(rows,kind=pPetscInt),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,ierr) CHKERRQ(ierr) call DMGetGlobalVector(da_local,coordinates,ierr); CHKERRQ(ierr) call DMDAVecGetArrayF90(da_local,coordinates,x_scal,ierr); CHKERRQ(ierr) diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 66b703ed6..0a267ecca 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -159,16 +159,16 @@ subroutine grid_mechanical_spectral_basic_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1 , 1, worldsize, & - 9, 0, & ! #dof (F tensor), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid da,ierr) ! handle, error CHKERRQ(ierr) call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 3d524029c..5bfce5e3f 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -179,16 +179,16 @@ subroutine grid_mechanical_spectral_polarisation_init ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1 , 1, worldsize, & - 18, 0, & ! #dof (F tensor), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid da,ierr) ! handle, error CHKERRQ(ierr) call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 67c7ba1c3..548fa4b3c 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -103,16 +103,16 @@ subroutine grid_thermal_spectral_init(T_0) ! initialize solver specific parts of PETSc call SNESCreate(PETSC_COMM_WORLD,thermal_snes,ierr); CHKERRQ(ierr) call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) - localK = 0 - localK(worldrank) = grid3 + localK = 0_pPetscInt + localK(worldrank) = int(grid3,pPetscInt) call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point - grid(1),grid(2),grid(3), & ! global grid - 1, 1, worldsize, & - 1, 0, & ! #dof (T field), ghost boundary width (domain overlap) - [grid(1)],[grid(2)],localK, & ! local grid + int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid + 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & + 1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap) + [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid thermal_grid,ierr) ! handle, error CHKERRQ(ierr) call SNESSetDM(thermal_snes,thermal_grid,ierr); CHKERRQ(ierr) ! connect snes to da diff --git a/src/prec.f90 b/src/prec.f90 index 843033219..fe3e7fc20 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -10,6 +10,11 @@ module prec use, intrinsic :: IEEE_arithmetic use, intrinsic :: ISO_C_binding +#ifdef PETSC +#include + use PETScSys +#endif + implicit none public @@ -21,6 +26,10 @@ module prec integer, parameter :: pInt = pI64 #else integer, parameter :: pInt = pI32 +#endif +#ifdef PETSC + PetscInt, private :: dummy + integer, parameter :: pPetscInt = kind(dummy) #endif integer, parameter :: pStringLen = 256 !< default string length integer, parameter :: pPathLen = 4096 !< maximum length of a path name on linux From e0e13ec659c286d7d1ad43ea385ef8f5a89b4028 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Mon, 27 Dec 2021 12:41:55 +0100 Subject: [PATCH 05/58] testing 64bit integer compilation --- .gitlab-ci.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f18e3f1e..9fe88bba0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -93,6 +93,13 @@ test_mesh_GNU: - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU +test_grid_GNU-64bit: + stage: compile + script: + - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit + - cd PRIVATE/testing/pytest + - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU-64bit + test_mesh_IntelLLVM: stage: compile script: From 1e965c42b7bfea0ec8957ac1ad4c5e0f478a7423 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Mon, 27 Dec 2021 17:44:22 +0100 Subject: [PATCH 06/58] don't rely on ML avoid dependencies to external packages as much as possible --- src/grid/grid_mech_FEM.f90 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index c4a8588d6..6030e9a74 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -145,8 +145,7 @@ subroutine grid_mechanical_FEM_init ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS, & '-mechanical_snes_type newtonls -mechanical_ksp_type fgmres & - &-mechanical_ksp_max_it 25 -mechanical_pc_type ml & - &-mechanical_mg_levels_ksp_type chebyshev', & + &-mechanical_ksp_max_it 25 -mechanical_mg_levels_ksp_type chebyshev', & ierr) CHKERRQ(ierr) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) From 56c22a91a560955005fe03562f993dccced2243b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Mon, 27 Dec 2021 18:59:33 +0100 Subject: [PATCH 07/58] testing grid with Intel LLVM works now ML is no longer the default preconditioner for FEM. Theoretically, installation of ML with all standard conforming compilers should be possible, but --download-ml failed for unknown reasons. --- .gitlab-ci.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9fe88bba0..055a40815 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -100,6 +100,13 @@ test_grid_GNU-64bit: - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU-64bit +test_grid_IntelLLVM: + stage: compile + script: + - module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM} + - cd PRIVATE/testing/pytest + - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_IntelLLVM + test_mesh_IntelLLVM: stage: compile script: @@ -128,7 +135,6 @@ test_Marc: - cd PRIVATE/testing/pytest - pytest -k 'compile and Marc' --basetemp ${TESTROOT}/compile_Marc - setup_grid: stage: compile script: From 95c64f7a0aefaebcfa6c82802a6205bba5a19a22 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 21:53:00 +0100 Subject: [PATCH 08/58] avoid failing self test increase number of samples to have less corner cases. Needs to be allocatable to avoid stack/heap issue on ifort --- src/math.f90 | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/math.f90 b/src/math.f90 index 34cb6eecb..3ea4d6a08 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -1432,9 +1432,11 @@ subroutine selfTest error stop 'math_LeviCivita' normal_distribution: block - real(pReal), dimension(500000) :: r + integer, parameter :: N = 1000000 + real(pReal), dimension(:), allocatable :: r real(pReal) :: mu, sigma + allocate(r(N)) call random_number(mu) call random_number(sigma) @@ -1443,11 +1445,11 @@ subroutine selfTest call math_normal(r,mu,sigma) - if (abs(mu -sum(r)/real(size(r),pReal))>5.0e-2_pReal) & + if (abs(mu -sum(r)/real(N,pReal))>5.0e-2_pReal) & error stop 'math_normal(mu)' - mu = sum(r)/real(size(r),pReal) - if (abs(sigma**2 -1.0_pReal/real(size(r)-1,pReal) * sum((r-mu)**2))/sigma > 5.0e-2_pReal) & + mu = sum(r)/real(N,pReal) + if (abs(sigma**2 -1.0_pReal/real(N-1,pReal) * sum((r-mu)**2))/sigma > 5.0e-2_pReal) & error stop 'math_normal(sigma)' end block normal_distribution From de9183af4e78d4663c062d2b0174ab2dc2db0910 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 21:55:49 +0100 Subject: [PATCH 09/58] functions without side-effects are 'pure' basically all 'getter' functions should be pure --- src/phase_mechanical.f90 | 6 +++--- src/phase_mechanical_elastic.f90 | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/phase_mechanical.f90 b/src/phase_mechanical.f90 index 37e6fe7bf..1917d81c9 100644 --- a/src/phase_mechanical.f90 +++ b/src/phase_mechanical.f90 @@ -168,17 +168,17 @@ submodule(phase) mechanical integer, intent(in) :: ph,en end function plastic_dislotwin_homogenizedC - module function elastic_C66(ph,en) result(C66) + pure module function elastic_C66(ph,en) result(C66) real(pReal), dimension(6,6) :: C66 integer, intent(in) :: ph, en end function elastic_C66 - module function elastic_mu(ph,en) result(mu) + pure module function elastic_mu(ph,en) result(mu) real(pReal) :: mu integer, intent(in) :: ph, en end function elastic_mu - module function elastic_nu(ph,en) result(nu) + pure module function elastic_nu(ph,en) result(nu) real(pReal) :: nu integer, intent(in) :: ph, en end function elastic_nu diff --git a/src/phase_mechanical_elastic.f90 b/src/phase_mechanical_elastic.f90 index acbbac236..24797a880 100644 --- a/src/phase_mechanical_elastic.f90 +++ b/src/phase_mechanical_elastic.f90 @@ -86,7 +86,7 @@ end subroutine elastic_init !-------------------------------------------------------------------------------------------------- !> @brief return 6x6 elasticity tensor !-------------------------------------------------------------------------------------------------- -module function elastic_C66(ph,en) result(C66) +pure module function elastic_C66(ph,en) result(C66) integer, intent(in) :: & ph, & @@ -140,7 +140,7 @@ end function elastic_C66 !-------------------------------------------------------------------------------------------------- !> @brief return shear modulus !-------------------------------------------------------------------------------------------------- -module function elastic_mu(ph,en) result(mu) +pure module function elastic_mu(ph,en) result(mu) integer, intent(in) :: & ph, & @@ -157,7 +157,7 @@ end function elastic_mu !-------------------------------------------------------------------------------------------------- !> @brief return Poisson ratio !-------------------------------------------------------------------------------------------------- -module function elastic_nu(ph,en) result(nu) +pure module function elastic_nu(ph,en) result(nu) integer, intent(in) :: & ph, & From 1661b815b21db384545f5476294ce3b4332c3c43 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 22:02:29 +0100 Subject: [PATCH 10/58] correct calculation of temperature dependent stacking fault energy --- src/phase_mechanical_plastic_dislotwin.f90 | 13 +++++++------ src/phase_mechanical_plastic_nonlocal.f90 | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 78681fa24..805b3cc43 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -153,10 +153,10 @@ module function plastic_dislotwin_init() result(myPlasticity) print'(/,a,i0)', ' # phases: ',count(myPlasticity); flush(IO_STDOUT) print'(/,1x,a)', 'A. Ma and F. Roters, Acta Materialia 52(12):3603–3612, 2004' - print'( 1x,a)', 'https://doi.org/10.1016/j.actamat.2004.04.012'//IO_EOL + print'( 1x,a)', 'https://doi.org/10.1016/j.actamat.2004.04.012' print'(/,1x,a)', 'F. Roters et al., Computational Materials Science 39:91–95, 2007' - print'( 1x,a)', 'https://doi.org/10.1016/j.commatsci.2006.04.014'//IO_EOL + print'( 1x,a)', 'https://doi.org/10.1016/j.commatsci.2006.04.014' print'(/,1x,a)', 'S.L. Wong et al., Acta Materialia 118:140–151, 2016' print'( 1x,a)', 'https://doi.org/10.1016/j.actamat.2016.07.032' @@ -656,12 +656,14 @@ module subroutine dislotwin_dotState(Mp,T,ph,en) dot_gamma_tr real(pReal) :: & mu, & - nu + nu, & + Gamma associate(prm => param(ph), stt => state(ph), dot => dotState(ph), dst => dependentState(ph)) mu = elastic_mu(ph,en) nu = elastic_nu(ph,en) + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) f_matrix = 1.0_pReal & - sum(stt%f_tw(1:prm%sum_N_tw,en)) & @@ -689,8 +691,7 @@ module subroutine dislotwin_dotState(Mp,T,ph,en) else ! Argon & Moffat, Acta Metallurgica, Vol. 29, pg 293 to 299, 1981 sigma_cl = dot_product(prm%n0_sl(1:3,i),matmul(Mp,prm%n0_sl(1:3,i))) - b_d = merge(24.0_pReal*PI*(1.0_pReal - nu)/(2.0_pReal + nu) & - * (prm%Gamma_sf(1) + prm%Gamma_sf(2) * T) / (mu*prm%b_sl(i)), & + b_d = merge(24.0_pReal*PI*(1.0_pReal - nu)/(2.0_pReal + nu) * Gamma / (mu*prm%b_sl(i)), & 1.0_pReal, & prm%ExtendedDislocations) v_cl = 2.0_pReal*prm%omega*b_d**2*exp(-prm%Q_cl/(K_B*T)) & @@ -756,7 +757,7 @@ module subroutine dislotwin_dependentState(T,ph,en) sumf_tw = sum(stt%f_tw(1:prm%sum_N_tw,en)) sumf_tr = sum(stt%f_tr(1:prm%sum_N_tr,en)) - Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * T + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) !* rescaled volume fraction for topology f_over_t_tw = stt%f_tw(1:prm%sum_N_tw,en)/prm%t_tw ! this is per system ... diff --git a/src/phase_mechanical_plastic_nonlocal.f90 b/src/phase_mechanical_plastic_nonlocal.f90 index 7118055fe..9d88fb5fc 100644 --- a/src/phase_mechanical_plastic_nonlocal.f90 +++ b/src/phase_mechanical_plastic_nonlocal.f90 @@ -203,7 +203,7 @@ module function plastic_nonlocal_init() result(myPlasticity) print'(/,a,i0)', ' # phases: ',Ninstances; flush(IO_STDOUT) print'(/,1x,a)', 'C. Reuber et al., Acta Materialia 71:333–348, 2014' - print'( 1x,a)', 'https://doi.org/10.1016/j.actamat.2014.03.012'//IO_EOL + print'( 1x,a)', 'https://doi.org/10.1016/j.actamat.2014.03.012' print'(/,1x,a)', 'C. Kords, Dissertation RWTH Aachen, 2014' print'( 1x,a)', 'http://publications.rwth-aachen.de/record/229993' From da00f33487f5020310788273797abd425762713a Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 22:09:53 +0100 Subject: [PATCH 11/58] transformation is only for fcc --- src/phase_mechanical_plastic_dislotwin.f90 | 48 ++++++++-------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 805b3cc43..b94a135f1 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -25,9 +25,9 @@ submodule(phase:plastic) dislotwin xi_sb = 1.0_pReal, & !< value for shearband resistance v_sb = 1.0_pReal, & !< value for shearband velocity_0 E_sb = 1.0_pReal, & !< activation energy for shear bands - delta_G = 1.0_pReal, & !< Free energy difference between austensite and martensite + delta_G = 1.0_pReal, & !< free energy difference between austensite and martensite i_tr = 1.0_pReal, & !< adjustment parameter to calculate MFP for transformation - h = 1.0_pReal, & !< Stack height of hex nucleus + h = 1.0_pReal, & !< stack height of hex nucleus T_ref = T_ROOM, & a_cI = 1.0_pReal, & a_cF = 1.0_pReal @@ -40,14 +40,13 @@ submodule(phase:plastic) dislotwin Q_sl,& !< activation energy for glide [J] for each slip system v_0, & !< dislocation velocity prefactor [m/s] for each slip system dot_N_0_tw, & !< twin nucleation rate [1/m³s] for each twin system - dot_N_0_tr, & !< trans nucleation rate [1/m³s] for each trans system t_tw, & !< twin thickness [m] for each twin system i_sl, & !< Adj. parameter for distance between 2 forest dislocations for each slip system t_tr, & !< martensite lamellar thickness [m] for each trans system p, & !< p-exponent in glide velocity q, & !< q-exponent in glide velocity - r, & !< r-exponent in twin nucleation rate - s, & !< s-exponent in trans nucleation rate + r, & !< exponent in twin nucleation rate + s, & !< exponent in trans nucleation rate tau_0, & !< strength due to elements in solid solution gamma_char, & !< characteristic shear for twins B, & !< drag coefficient @@ -306,10 +305,10 @@ module function plastic_dislotwin_init() result(myPlasticity) prm%b_tr = pl%get_as1dFloat('b_tr') prm%b_tr = math_expand(prm%b_tr,prm%N_tr) - prm%h = pl%get_asFloat('h', defaultVal=0.0_pReal) ! ToDo: How to handle that??? - prm%i_tr = pl%get_asFloat('i_tr', defaultVal=0.0_pReal) ! ToDo: How to handle that??? + prm%h = pl%get_asFloat('h', defaultVal=0.0_pReal) ! ToDo: This is not optional! + prm%i_tr = pl%get_asFloat('i_tr', defaultVal=0.0_pReal) ! ToDo: This is not optional! prm%delta_G = pl%get_asFloat('delta_G') - prm%x_c_tr = pl%get_asFloat('x_c_tr', defaultVal=0.0_pReal) ! ToDo: How to handle that??? + prm%x_c_tr = pl%get_asFloat('x_c_tr', defaultVal=0.0_pReal) ! ToDo: This is not optional! prm%L_tr = pl%get_asFloat('L_tr') prm%a_cI = pl%get_asFloat('a_cI', defaultVal=0.0_pReal) prm%a_cF = pl%get_asFloat('a_cF', defaultVal=0.0_pReal) @@ -324,10 +323,6 @@ module function plastic_dislotwin_init() result(myPlasticity) prm%a_cI, & prm%a_cF) - if (phase_lattice(ph) /= 'cF') then - prm%dot_N_0_tr = pl%get_as1dFloat('dot_N_0_tr') - prm%dot_N_0_tr = math_expand(prm%dot_N_0_tr,prm%N_tr) - endif prm%t_tr = pl%get_as1dFloat('t_tr') prm%t_tr = math_expand(prm%t_tr,prm%N_tr) prm%s = pl%get_as1dFloat('p_tr',defaultVal=[0.0_pReal]) @@ -339,11 +334,8 @@ module function plastic_dislotwin_init() result(myPlasticity) if ( prm%i_tr < 0.0_pReal) extmsg = trim(extmsg)//' i_tr' if (any(prm%t_tr < 0.0_pReal)) extmsg = trim(extmsg)//' t_tr' if (any(prm%s < 0.0_pReal)) extmsg = trim(extmsg)//' p_tr' - if (phase_lattice(ph) /= 'cF') then - if (any(prm%dot_N_0_tr < 0.0_pReal)) extmsg = trim(extmsg)//' dot_N_0_tr' - end if else transActive - allocate(prm%s,prm%b_tr,prm%t_tr,prm%dot_N_0_tr,source=emptyRealArray) + allocate(prm%s,prm%b_tr,prm%t_tr,source=emptyRealArray) allocate(prm%h_tr_tr(0,0)) end if transActive @@ -1042,20 +1034,16 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tr tau(i) = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - isFCC: if (prm%fccTwinTransNucleation) then - s1=prm%fcc_twinNucleationSlipPair(1,i) - s2=prm%fcc_twinNucleationSlipPair(2,i) - if (tau(i) < dst%tau_r_tr(i,en)) then ! ToDo: correct? - Ndot0=(abs(dot_gamma_sl(s1))*(stt%rho_mob(s2,en)+stt%rho_dip(s2,en))+& - abs(dot_gamma_sl(s2))*(stt%rho_mob(s1,en)+stt%rho_dip(s1,en)))/& - (prm%L_tr*prm%b_sl(i))*& - (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tr(i,en)-tau(i)))) - else - Ndot0=0.0_pReal - end if - else isFCC - Ndot0=prm%dot_N_0_tr(i) - end if isFCC + s1=prm%fcc_twinNucleationSlipPair(1,i) + s2=prm%fcc_twinNucleationSlipPair(2,i) + if (tau(i) < dst%tau_r_tr(i,en)) then ! ToDo: correct? + Ndot0=(abs(dot_gamma_sl(s1))*(stt%rho_mob(s2,en)+stt%rho_dip(s2,en))+& + abs(dot_gamma_sl(s2))*(stt%rho_mob(s1,en)+stt%rho_dip(s1,en)))/& + (prm%L_tr*prm%b_sl(i))*& + (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tr(i,en)-tau(i)))) + else + Ndot0=0.0_pReal + end if end do significantStress: where(tau > tol_math_check) From d130225c9f90939676bb4ff3cfd66420574b2c23 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 22:13:48 +0100 Subject: [PATCH 12/58] polishing --- src/phase_mechanical_plastic_dislotwin.f90 | 42 +++++++++++----------- src/phase_mechanical_plastic_nonlocal.f90 | 1 - 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index b94a135f1..09c7ca35c 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -779,9 +779,8 @@ module subroutine dislotwin_dependentState(T,ph,en) + 3.0_pReal*prm%b_tr*mu/(prm%L_tr*prm%b_tr) & + prm%h*prm%delta_G/(3.0_pReal*prm%b_tr) - dst%V_tw(:,en) = (PI/4.0_pReal)*prm%t_tw*dst%Lambda_tw(:,en)**2 - dst%V_tr(:,en) = (PI/4.0_pReal)*prm%t_tr*dst%Lambda_tr(:,en)**2 - + dst%V_tw(:,en) = PI/4.0_pReal*dst%Lambda_tw(:,en)**2*prm%t_tw + dst%V_tr(:,en) = PI/4.0_pReal*dst%Lambda_tr(:,en)**2*prm%t_tr x0 = mu*prm%b_tw**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip dst%tau_r_tw(:,en) = mu*prm%b_tw/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(pi/3.0_pReal)/x0) @@ -954,11 +953,12 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& real, dimension(param(ph)%sum_N_tw) :: & tau, & - Ndot0, & + dot_N_0, & stressRatio_r, & ddot_gamma_dtau - - integer :: i,s1,s2 + integer, dimension(2) :: & + s + integer :: i associate(prm => param(ph), stt => state(ph), dst => dependentState(ph)) @@ -966,24 +966,23 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tw tau(i) = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) isFCC: if (prm%fccTwinTransNucleation) then - s1=prm%fcc_twinNucleationSlipPair(1,i) - s2=prm%fcc_twinNucleationSlipPair(2,i) if (tau(i) < dst%tau_r_tw(i,en)) then ! ToDo: correct? - Ndot0=(abs(dot_gamma_sl(s1))*(stt%rho_mob(s2,en)+stt%rho_dip(s2,en))+& - abs(dot_gamma_sl(s2))*(stt%rho_mob(s1,en)+stt%rho_dip(s1,en)))/& + s=prm%fcc_twinNucleationSlipPair(1:2,i) + dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& (prm%L_tw*prm%b_sl(i))*& (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tw(i,en)-tau(i)))) else - Ndot0=0.0_pReal + dot_N_0=0.0_pReal end if else isFCC - Ndot0=prm%dot_N_0_tw(i) + dot_N_0=prm%dot_N_0_tw(i) end if isFCC end do significantStress: where(tau > tol_math_check) StressRatio_r = (dst%tau_hat_tw(:,en)/tau)**prm%r - dot_gamma_tw = prm%gamma_char * dst%V_tw(:,en) * Ndot0*exp(-StressRatio_r) + dot_gamma_tw = prm%gamma_char * dst%V_tw(:,en) * dot_N_0*exp(-StressRatio_r) ddot_gamma_dtau = (dot_gamma_tw*prm%r/tau)*StressRatio_r else where significantStress dot_gamma_tw = 0.0_pReal @@ -1024,31 +1023,32 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& real, dimension(param(ph)%sum_N_tr) :: & tau, & - Ndot0, & + dot_N_0, & stressRatio_s, & ddot_gamma_dtau - integer :: i,s1,s2 + integer, dimension(2) :: & + s + integer :: i associate(prm => param(ph), stt => state(ph), dst => dependentState(ph)) do i = 1, prm%sum_N_tr tau(i) = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - s1=prm%fcc_twinNucleationSlipPair(1,i) - s2=prm%fcc_twinNucleationSlipPair(2,i) if (tau(i) < dst%tau_r_tr(i,en)) then ! ToDo: correct? - Ndot0=(abs(dot_gamma_sl(s1))*(stt%rho_mob(s2,en)+stt%rho_dip(s2,en))+& - abs(dot_gamma_sl(s2))*(stt%rho_mob(s1,en)+stt%rho_dip(s1,en)))/& + s=prm%fcc_twinNucleationSlipPair(1:2,i) + dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& (prm%L_tr*prm%b_sl(i))*& (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tr(i,en)-tau(i)))) else - Ndot0=0.0_pReal + dot_N_0=0.0_pReal end if end do significantStress: where(tau > tol_math_check) StressRatio_s = (dst%tau_hat_tr(:,en)/tau)**prm%s - dot_gamma_tr = dst%V_tr(:,en) * Ndot0*exp(-StressRatio_s) + dot_gamma_tr = dst%V_tr(:,en) * dot_N_0*exp(-StressRatio_s) ddot_gamma_dtau = (dot_gamma_tr*prm%s/tau)*StressRatio_s else where significantStress dot_gamma_tr = 0.0_pReal diff --git a/src/phase_mechanical_plastic_nonlocal.f90 b/src/phase_mechanical_plastic_nonlocal.f90 index 9d88fb5fc..1715af2d9 100644 --- a/src/phase_mechanical_plastic_nonlocal.f90 +++ b/src/phase_mechanical_plastic_nonlocal.f90 @@ -1570,7 +1570,6 @@ subroutine stateInit(ini,phase,Nentries) upto, & s real(pReal), dimension(2) :: & - noise, & rnd real(pReal) :: & meanDensity, & From f2b6ddece1f44ffb23e587a362cef195e6c890a4 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 22:36:06 +0100 Subject: [PATCH 13/58] reduce memory footprint --- src/phase_mechanical_plastic_dislotwin.f90 | 42 ++++++++++++++-------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 09c7ca35c..e96c0dbbf 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -103,9 +103,7 @@ submodule(phase:plastic) dislotwin tau_hat_tw, & !< threshold stress for twinning tau_hat_tr, & !< threshold stress for transformation V_tw, & !< volume of a new twin - V_tr, & !< volume of a new martensite disc - tau_r_tw, & !< stress to bring partials close together (twin) - tau_r_tr !< stress to bring partials close together (trans) + V_tr !< volume of a new martensite disc end type tDislotwinDependentState !-------------------------------------------------------------------------------------------------- @@ -435,12 +433,10 @@ module function plastic_dislotwin_init() result(myPlasticity) allocate(dst%Lambda_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) allocate(dst%tau_hat_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) - allocate(dst%tau_r_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) allocate(dst%V_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) allocate(dst%Lambda_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) allocate(dst%tau_hat_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) - allocate(dst%tau_r_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) allocate(dst%V_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) end associate @@ -782,12 +778,6 @@ module subroutine dislotwin_dependentState(T,ph,en) dst%V_tw(:,en) = PI/4.0_pReal*dst%Lambda_tw(:,en)**2*prm%t_tw dst%V_tr(:,en) = PI/4.0_pReal*dst%Lambda_tr(:,en)**2*prm%t_tr - x0 = mu*prm%b_tw**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip - dst%tau_r_tw(:,en) = mu*prm%b_tw/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(pi/3.0_pReal)/x0) - - x0 = mu*prm%b_tr**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip - dst%tau_r_tr(:,en) = mu*prm%b_tr/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(pi/3.0_pReal)/x0) - end associate end subroutine dislotwin_dependentState @@ -956,6 +946,11 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& dot_N_0, & stressRatio_r, & ddot_gamma_dtau + real :: & + x0, & + tau_r, & + Gamma, & + mu, nu integer, dimension(2) :: & s integer :: i @@ -963,15 +958,21 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& associate(prm => param(ph), stt => state(ph), dst => dependentState(ph)) + mu = elastic_mu(ph,en) + nu = elastic_nu(ph,en) + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) + do i = 1, prm%sum_N_tw tau(i) = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) isFCC: if (prm%fccTwinTransNucleation) then - if (tau(i) < dst%tau_r_tw(i,en)) then ! ToDo: correct? + x0 = mu*prm%b_tw(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip + tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(pi/3.0_pReal)/x0) + if (tau(i) < tau_r) then ! ToDo: correct? s=prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& (prm%L_tw*prm%b_sl(i))*& - (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tw(i,en)-tau(i)))) + (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) else dot_N_0=0.0_pReal end if @@ -1026,6 +1027,11 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& dot_N_0, & stressRatio_s, & ddot_gamma_dtau + real :: & + x0, & + tau_r, & + Gamma, & + mu, nu integer, dimension(2) :: & s integer :: i @@ -1033,14 +1039,20 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& associate(prm => param(ph), stt => state(ph), dst => dependentState(ph)) + mu = elastic_mu(ph,en) + nu = elastic_nu(ph,en) + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) + do i = 1, prm%sum_N_tr tau(i) = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - if (tau(i) < dst%tau_r_tr(i,en)) then ! ToDo: correct? + x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip ! ToDo: In the paper, this is the Burgers vector for slip + tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(pi/3.0_pReal)/x0) + if (tau(i) < tau_r) then ! ToDo: correct? s=prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& (prm%L_tr*prm%b_sl(i))*& - (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(dst%tau_r_tr(i,en)-tau(i)))) + (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) else dot_N_0=0.0_pReal end if From 0116e2dae605a062d0f6586a9a18239b7cf208df Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 30 Dec 2021 22:38:39 +0100 Subject: [PATCH 14/58] bugfix: write only to active twin/trans system --- src/phase_mechanical_plastic_dislotwin.f90 | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index e96c0dbbf..b8f3516ed 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -969,15 +969,14 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(pi/3.0_pReal)/x0) if (tau(i) < tau_r) then ! ToDo: correct? s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& - (prm%L_tw*prm%b_sl(i))*& - (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) + dot_N_0(i)=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& + (prm%L_tw*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) else - dot_N_0=0.0_pReal + dot_N_0(i)=0.0_pReal end if else isFCC - dot_N_0=prm%dot_N_0_tw(i) + dot_N_0(i)=prm%dot_N_0_tw(i) end if isFCC end do @@ -1045,16 +1044,15 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tr tau(i) = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip ! ToDo: In the paper, this is the Burgers vector for slip + x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(pi/3.0_pReal)/x0) if (tau(i) < tau_r) then ! ToDo: correct? s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& - (prm%L_tr*prm%b_sl(i))*& - (1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) + dot_N_0(i)=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& + (prm%L_tr*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) else - dot_N_0=0.0_pReal + dot_N_0(i)=0.0_pReal end if end do From 017c182640a079b2b4cfdfc37d35506d95d0a0f7 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 31 Dec 2021 08:45:55 +0100 Subject: [PATCH 15/58] branch only once --- src/phase_mechanical_plastic_dislotwin.f90 | 36 +++++++++------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index b8f3516ed..5e4b757c7 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -966,7 +966,7 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& tau(i) = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) isFCC: if (prm%fccTwinTransNucleation) then x0 = mu*prm%b_tw(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip - tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(pi/3.0_pReal)/x0) + tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) if (tau(i) < tau_r) then ! ToDo: correct? s=prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0(i)=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& @@ -1022,13 +1022,12 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& ddot_gamma_dtau_tr real, dimension(param(ph)%sum_N_tr) :: & - tau, & - dot_N_0, & - stressRatio_s, & ddot_gamma_dtau real :: & + stressRatio_s, & + tau, tau_r, & + dot_N_0, & x0, & - tau_r, & Gamma, & mu, nu integer, dimension(2) :: & @@ -1043,28 +1042,23 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) do i = 1, prm%sum_N_tr - tau(i) = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) + tau = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip - tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(pi/3.0_pReal)/x0) - if (tau(i) < tau_r) then ! ToDo: correct? + tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) + if (tau > tol_math_check .and. tau < tau_r) then s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0(i)=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& - (prm%L_tr*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) + dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& + (prm%L_tr*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau))) + StressRatio_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) + dot_gamma_tr(i) = dst%V_tr(i,en) * dot_N_0*exp(-StressRatio_s) + ddot_gamma_dtau(i) = (dot_gamma_tr(i)*prm%s(i)/tau)*StressRatio_s else - dot_N_0(i)=0.0_pReal + dot_gamma_tr(i) = 0.0_pReal + ddot_gamma_dtau(i) = 0.0_pReal end if end do - significantStress: where(tau > tol_math_check) - StressRatio_s = (dst%tau_hat_tr(:,en)/tau)**prm%s - dot_gamma_tr = dst%V_tr(:,en) * dot_N_0*exp(-StressRatio_s) - ddot_gamma_dtau = (dot_gamma_tr*prm%s/tau)*StressRatio_s - else where significantStress - dot_gamma_tr = 0.0_pReal - ddot_gamma_dtau = 0.0_pReal - end where significantStress - end associate if (present(ddot_gamma_dtau_tr)) ddot_gamma_dtau_tr = ddot_gamma_dtau From e678b231d99859f098e12f65f97b5214b2ee3608 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 31 Dec 2021 09:01:23 +0100 Subject: [PATCH 16/58] following naming convention --- src/phase_mechanical_plastic_dislotwin.f90 | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 5e4b757c7..3c666934a 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -944,7 +944,7 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& real, dimension(param(ph)%sum_N_tw) :: & tau, & dot_N_0, & - stressRatio_r, & + ratio_tau_r, & ddot_gamma_dtau real :: & x0, & @@ -981,11 +981,11 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& end do significantStress: where(tau > tol_math_check) - StressRatio_r = (dst%tau_hat_tw(:,en)/tau)**prm%r - dot_gamma_tw = prm%gamma_char * dst%V_tw(:,en) * dot_N_0*exp(-StressRatio_r) - ddot_gamma_dtau = (dot_gamma_tw*prm%r/tau)*StressRatio_r + ratio_tau_r = (dst%tau_hat_tw(:,en)/tau)**prm%r + dot_gamma_tw = prm%gamma_char * dst%V_tw(:,en) * dot_N_0*exp(-ratio_tau_r) + ddot_gamma_dtau = (dot_gamma_tw*prm%r/tau)*ratio_tau_r else where significantStress - dot_gamma_tw = 0.0_pReal + dot_gamma_tw = 0.0_pReal ddot_gamma_dtau = 0.0_pReal end where significantStress @@ -1024,7 +1024,7 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& real, dimension(param(ph)%sum_N_tr) :: & ddot_gamma_dtau real :: & - stressRatio_s, & + ratio_tau_s, & tau, tau_r, & dot_N_0, & x0, & @@ -1046,13 +1046,16 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) if (tau > tol_math_check .and. tau < tau_r) then + + ratio_tau_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) + s=prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& (prm%L_tr*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau))) - StressRatio_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) - dot_gamma_tr(i) = dst%V_tr(i,en) * dot_N_0*exp(-StressRatio_s) - ddot_gamma_dtau(i) = (dot_gamma_tr(i)*prm%s(i)/tau)*StressRatio_s + + dot_gamma_tr(i) = dst%V_tr(i,en) * dot_N_0*exp(-ratio_tau_s) + ddot_gamma_dtau(i) = (dot_gamma_tr(i)*prm%s(i)/tau)*ratio_tau_s else dot_gamma_tr(i) = 0.0_pReal ddot_gamma_dtau(i) = 0.0_pReal From 770cf33667739b80daa68b170f666d2f16d9b66f Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 1 Jan 2022 11:37:04 +0100 Subject: [PATCH 17/58] correct calculation of tangent. thanks to Seyedamirhossein Motaman (RWTH Aachen) for reporting --- src/phase_mechanical_plastic_dislotwin.f90 | 116 ++++++++++++--------- 1 file changed, 64 insertions(+), 52 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 3c666934a..5d2191726 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -17,8 +17,8 @@ submodule(phase:plastic) dislotwin p_sb = 1.0_pReal, & !< p-exponent in shear band velocity q_sb = 1.0_pReal, & !< q-exponent in shear band velocity i_tw = 1.0_pReal, & !< adjustment parameter to calculate MFP for twinning - L_tw = 1.0_pReal, & !< Length of twin nuclei in Burgers vectors - L_tr = 1.0_pReal, & !< Length of trans nuclei in Burgers vectors + L_tw = 1.0_pReal, & !< length of twin nuclei in Burgers vectors: TODO unit should be meters + L_tr = 1.0_pReal, & !< length of trans nuclei in Burgers vectors: TODO unit should be meters x_c_tw = 1.0_pReal, & !< critical distance for formation of twin nucleus x_c_tr = 1.0_pReal, & !< critical distance for formation of trans nucleus V_cs = 1.0_pReal, & !< cross slip volume @@ -731,8 +731,6 @@ module subroutine dislotwin_dependentState(T,ph,en) real(pReal), dimension(param(ph)%sum_N_tr) :: & inv_lambda_tr_tr, & !< 1/mean free distance between 2 martensite stacks from different systems seen by a growing martensite f_over_t_tr - real(pReal), dimension(:), allocatable :: & - x0 real(pReal) :: & mu, & nu @@ -941,16 +939,15 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& real(pReal), dimension(param(ph)%sum_N_tw), optional, intent(out) :: & ddot_gamma_dtau_tw - real, dimension(param(ph)%sum_N_tw) :: & - tau, & - dot_N_0, & - ratio_tau_r, & - ddot_gamma_dtau real :: & + ratio_tau_s, & + tau, tau_r, & + dot_N_0, & x0, & - tau_r, & Gamma, & - mu, nu + mu, nu, & + P_ncs, dP_ncs_dtau, & + P, dP_dtau integer, dimension(2) :: & s integer :: i @@ -958,41 +955,53 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& associate(prm => param(ph), stt => state(ph), dst => dependentState(ph)) - mu = elastic_mu(ph,en) - nu = elastic_nu(ph,en) - Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) + isFCC: if (prm%fccTwinTransNucleation) then + mu = elastic_mu(ph,en) + nu = elastic_nu(ph,en) + Gamma = prm%Gamma_sf(1) + prm%Gamma_sf(2) * (T-prm%T_ref) - do i = 1, prm%sum_N_tw - tau(i) = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) - isFCC: if (prm%fccTwinTransNucleation) then - x0 = mu*prm%b_tw(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip + do i = 1, prm%sum_N_tw + tau = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) + x0 = mu*prm%b_tw(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, the Burgers vector for slip is used tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) - if (tau(i) < tau_r) then ! ToDo: correct? - s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0(i)=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& - (prm%L_tw*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau(i)))) - else - dot_N_0(i)=0.0_pReal - end if - else isFCC - dot_N_0(i)=prm%dot_N_0_tw(i) - end if isFCC - end do - significantStress: where(tau > tol_math_check) - ratio_tau_r = (dst%tau_hat_tw(:,en)/tau)**prm%r - dot_gamma_tw = prm%gamma_char * dst%V_tw(:,en) * dot_N_0*exp(-ratio_tau_r) - ddot_gamma_dtau = (dot_gamma_tw*prm%r/tau)*ratio_tau_r - else where significantStress - dot_gamma_tw = 0.0_pReal - ddot_gamma_dtau = 0.0_pReal - end where significantStress + if (tau > tol_math_check .and. tau < tau_r) then + ratio_tau_s = (dst%tau_hat_tw(i,en)/tau)**prm%r(i) + P = exp(-ratio_tau_s) + dP_dTau = prm%r(i) * ratio_tau_s/tau * P + + s=prm%fcc_twinNucleationSlipPair(1:2,i) + dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/(prm%L_tw*prm%b_sl(i)) + + P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) + dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) + + dot_gamma_tw(i) = dst%V_tw(i,en)*dot_N_0*P_ncs*P + if (present(ddot_gamma_dtau_tw)) & + ddot_gamma_dtau_tw(i) = dst%V_tw(i,en)*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) + else + dot_gamma_tw(i) = 0.0_pReal + if (present(ddot_gamma_dtau_tw)) ddot_gamma_dtau_tw(i) = 0.0_pReal + end if + end do + + else isFCC + do i = 1, prm%sum_N_tw + error stop 'not implemented' + tau = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) + if (tau > tol_math_check) then + dot_gamma_tw(i) = 0.0_pReal + if (present(ddot_gamma_dtau_tw)) ddot_gamma_dtau_tw(i) = 0.0_pReal + else + dot_gamma_tw(i) = 0.0_pReal + if (present(ddot_gamma_dtau_tw)) ddot_gamma_dtau_tw(i) = 0.0_pReal + end if + end do + end if isFCC end associate - if (present(ddot_gamma_dtau_tw)) ddot_gamma_dtau_tw = ddot_gamma_dtau - end subroutine kinetics_tw @@ -1021,15 +1030,15 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& real(pReal), dimension(param(ph)%sum_N_tr), optional, intent(out) :: & ddot_gamma_dtau_tr - real, dimension(param(ph)%sum_N_tr) :: & - ddot_gamma_dtau real :: & ratio_tau_s, & tau, tau_r, & dot_N_0, & x0, & Gamma, & - mu, nu + mu, nu, & + P_ncs, dP_ncs_dtau, & + P, dP_dtau integer, dimension(2) :: & s integer :: i @@ -1043,29 +1052,32 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tr tau = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, this is the Burgers vector for slip + x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, the Burgers vector for slip is used tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) - if (tau > tol_math_check .and. tau < tau_r) then + if (tau > tol_math_check .and. tau < tau_r) then ratio_tau_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) + P = exp(-ratio_tau_s) + dP_dTau = prm%s(i) * ratio_tau_s/tau * P s=prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/& - (prm%L_tr*prm%b_sl(i))*(1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau))) + abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/(prm%L_tr*prm%b_sl(i)) - dot_gamma_tr(i) = dst%V_tr(i,en) * dot_N_0*exp(-ratio_tau_s) - ddot_gamma_dtau(i) = (dot_gamma_tr(i)*prm%s(i)/tau)*ratio_tau_s + P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) + dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) + + dot_gamma_tr(i) = dst%V_tr(i,en)*dot_N_0*P_ncs*P + if (present(ddot_gamma_dtau_tr)) & + ddot_gamma_dtau_tr(i) = dst%V_tr(i,en)*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) else dot_gamma_tr(i) = 0.0_pReal - ddot_gamma_dtau(i) = 0.0_pReal + if (present(ddot_gamma_dtau_tr)) ddot_gamma_dtau_tr(i) = 0.0_pReal end if end do end associate - if (present(ddot_gamma_dtau_tr)) ddot_gamma_dtau_tr = ddot_gamma_dtau - end subroutine kinetics_tr end submodule dislotwin From d181b988c14ba48469658e115424c0b5bc62970d Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 1 Jan 2022 10:47:15 +0100 Subject: [PATCH 18/58] using vector access --- src/phase_mechanical_plastic_dislotwin.f90 | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 5d2191726..a2f0ce732 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -970,9 +970,9 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& P = exp(-ratio_tau_s) dP_dTau = prm%r(i) * ratio_tau_s/tau * P - s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/(prm%L_tw*prm%b_sl(i)) + s = prm%fcc_twinNucleationSlipPair(1:2,i) + dot_N_0 = sum(abs(dot_gamma_sl(s(2:1:-1)))*(stt%rho_mob(s,en)+stt%rho_dip(s,en))) & + / (prm%L_tw*prm%b_sl(i)) P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) @@ -1060,9 +1060,9 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& P = exp(-ratio_tau_s) dP_dTau = prm%s(i) * ratio_tau_s/tau * P - s=prm%fcc_twinNucleationSlipPair(1:2,i) - dot_N_0=(abs(dot_gamma_sl(s(1)))*(stt%rho_mob(s(2),en)+stt%rho_dip(s(2),en))+& - abs(dot_gamma_sl(s(2)))*(stt%rho_mob(s(1),en)+stt%rho_dip(s(1),en)))/(prm%L_tr*prm%b_sl(i)) + s = prm%fcc_twinNucleationSlipPair(1:2,i) + dot_N_0 = sum(abs(dot_gamma_sl(s(2:1:-1)))*(stt%rho_mob(s,en)+stt%rho_dip(s,en))) & + / (prm%L_tr*prm%b_sl(i)) P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) From 03c6708629ce603ff560555aaaa0c5389b1456bd Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 1 Jan 2022 11:26:31 +0100 Subject: [PATCH 19/58] polishing --- src/phase_mechanical_plastic_dislotwin.f90 | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index a2f0ce732..6a9043cde 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -940,7 +940,7 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& ddot_gamma_dtau_tw real :: & - ratio_tau_s, & + ratio_tau_r, & tau, tau_r, & dot_N_0, & x0, & @@ -962,13 +962,13 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tw tau = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) - x0 = mu*prm%b_tw(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, the Burgers vector for slip is used - tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) + x0 = mu*prm%b_tr(i)**2*(2.0_pReal+nu)/(Gamma*8.0_pReal*PI*(1.0_pReal-nu)) ! ToDo: In the paper, the Burgers vector for slip is used + tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) ! ToDo: In the paper, the Burgers vector for slip is used if (tau > tol_math_check .and. tau < tau_r) then - ratio_tau_s = (dst%tau_hat_tw(i,en)/tau)**prm%r(i) - P = exp(-ratio_tau_s) - dP_dTau = prm%r(i) * ratio_tau_s/tau * P + ratio_tau_r = (dst%tau_hat_tw(i,en)/tau)**prm%r(i) + P = exp(-ratio_tau_r) + dP_dTau = prm%r(i) * ratio_tau_r/tau * P s = prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0 = sum(abs(dot_gamma_sl(s(2:1:-1)))*(stt%rho_mob(s,en)+stt%rho_dip(s,en))) & @@ -1052,8 +1052,8 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tr tau = math_tensordot(Mp,prm%P_tr(1:3,1:3,i)) - x0 = mu*prm%b_tr(i)**2/(Gamma*8.0_pReal*PI)*(2.0_pReal+nu)/(1.0_pReal-nu) ! ToDo: In the paper, the Burgers vector for slip is used - tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) + x0 = mu*prm%b_tr(i)**2*(2.0_pReal+nu)/(Gamma*8.0_pReal*PI*(1.0_pReal-nu)) ! ToDo: In the paper, the Burgers vector for slip is used + tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) ! ToDo: In the paper, the Burgers vector for slip is used if (tau > tol_math_check .and. tau < tau_r) then ratio_tau_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) From 4a7f23069c87fe0c4a04970ab36d92312e6dfeff Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 4 Jan 2022 07:32:56 +0100 Subject: [PATCH 20/58] avoid misleading variable name --- src/phase_mechanical_plastic_dislotwin.f90 | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 6a9043cde..e1ccf3f5f 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -940,7 +940,6 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& ddot_gamma_dtau_tw real :: & - ratio_tau_r, & tau, tau_r, & dot_N_0, & x0, & @@ -966,9 +965,8 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) ! ToDo: In the paper, the Burgers vector for slip is used if (tau > tol_math_check .and. tau < tau_r) then - ratio_tau_r = (dst%tau_hat_tw(i,en)/tau)**prm%r(i) - P = exp(-ratio_tau_r) - dP_dTau = prm%r(i) * ratio_tau_r/tau * P + P = exp(-(dst%tau_hat_tw(i,en)/tau)**prm%r(i)) + dP_dTau = prm%r(i) * (dst%tau_hat_tw(i,en)/tau)**prm%r(i)/tau * P s = prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0 = sum(abs(dot_gamma_sl(s(2:1:-1)))*(stt%rho_mob(s,en)+stt%rho_dip(s,en))) & @@ -1031,7 +1029,6 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& ddot_gamma_dtau_tr real :: & - ratio_tau_s, & tau, tau_r, & dot_N_0, & x0, & @@ -1056,9 +1053,8 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& tau_r = mu*prm%b_tr(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tr)+cos(PI/3.0_pReal)/x0) ! ToDo: In the paper, the Burgers vector for slip is used if (tau > tol_math_check .and. tau < tau_r) then - ratio_tau_s = (dst%tau_hat_tr(i,en)/tau)**prm%s(i) - P = exp(-ratio_tau_s) - dP_dTau = prm%s(i) * ratio_tau_s/tau * P + P = exp(-(dst%tau_hat_tr(i,en)/tau)**prm%s(i)) + dP_dTau = prm%s(i) * (dst%tau_hat_tr(i,en)/tau)**prm%s(i)/tau * P s = prm%fcc_twinNucleationSlipPair(1:2,i) dot_N_0 = sum(abs(dot_gamma_sl(s(2:1:-1)))*(stt%rho_mob(s,en)+stt%rho_dip(s,en))) & From 03e3fbd98f0b55d492c434a33cf0decde7f511c4 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 4 Jan 2022 07:40:58 +0100 Subject: [PATCH 21/58] compute only when needed --- src/phase_mechanical_plastic_dislotwin.f90 | 25 +++++++++------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index e1ccf3f5f..41b4854a7 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -101,9 +101,7 @@ submodule(phase:plastic) dislotwin Lambda_tr, & !< mean free path between 2 obstacles seen by a growing martensite tau_pass, & !< threshold stress for slip tau_hat_tw, & !< threshold stress for twinning - tau_hat_tr, & !< threshold stress for transformation - V_tw, & !< volume of a new twin - V_tr !< volume of a new martensite disc + tau_hat_tr !< threshold stress for transformation end type tDislotwinDependentState !-------------------------------------------------------------------------------------------------- @@ -433,11 +431,9 @@ module function plastic_dislotwin_init() result(myPlasticity) allocate(dst%Lambda_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) allocate(dst%tau_hat_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) - allocate(dst%V_tw (prm%sum_N_tw,Nmembers),source=0.0_pReal) allocate(dst%Lambda_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) allocate(dst%tau_hat_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) - allocate(dst%V_tr (prm%sum_N_tr,Nmembers),source=0.0_pReal) end associate @@ -773,9 +769,6 @@ module subroutine dislotwin_dependentState(T,ph,en) + 3.0_pReal*prm%b_tr*mu/(prm%L_tr*prm%b_tr) & + prm%h*prm%delta_G/(3.0_pReal*prm%b_tr) - dst%V_tw(:,en) = PI/4.0_pReal*dst%Lambda_tw(:,en)**2*prm%t_tw - dst%V_tr(:,en) = PI/4.0_pReal*dst%Lambda_tr(:,en)**2*prm%t_tr - end associate end subroutine dislotwin_dependentState @@ -942,7 +935,7 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& real :: & tau, tau_r, & dot_N_0, & - x0, & + x0, V, & Gamma, & mu, nu, & P_ncs, dP_ncs_dtau, & @@ -975,9 +968,10 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) - dot_gamma_tw(i) = dst%V_tw(i,en)*dot_N_0*P_ncs*P + V = PI/4.0_pReal*dst%Lambda_tw(i,en)**2*prm%t_tw(i) + dot_gamma_tw(i) = V*dot_N_0*P_ncs*P if (present(ddot_gamma_dtau_tw)) & - ddot_gamma_dtau_tw(i) = dst%V_tw(i,en)*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) + ddot_gamma_dtau_tw(i) = V*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) else dot_gamma_tw(i) = 0.0_pReal if (present(ddot_gamma_dtau_tw)) ddot_gamma_dtau_tw(i) = 0.0_pReal @@ -1031,7 +1025,7 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& real :: & tau, tau_r, & dot_N_0, & - x0, & + x0, V, & Gamma, & mu, nu, & P_ncs, dP_ncs_dtau, & @@ -1062,10 +1056,11 @@ pure subroutine kinetics_tr(Mp,T,dot_gamma_sl,ph,en,& P_ncs = 1.0_pReal-exp(-prm%V_cs/(K_B*T)*(tau_r-tau)) dP_ncs_dtau = prm%V_cs / (K_B * T) * (P_ncs - 1.0_pReal) - - dot_gamma_tr(i) = dst%V_tr(i,en)*dot_N_0*P_ncs*P + + V = PI/4.0_pReal*dst%Lambda_tr(i,en)**2*prm%t_tr(i) + dot_gamma_tr(i) = V*dot_N_0*P_ncs*P if (present(ddot_gamma_dtau_tr)) & - ddot_gamma_dtau_tr(i) = dst%V_tr(i,en)*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) + ddot_gamma_dtau_tr(i) = V*dot_N_0*(P*dP_ncs_dtau + P_ncs*dP_dtau) else dot_gamma_tr(i) = 0.0_pReal if (present(ddot_gamma_dtau_tr)) ddot_gamma_dtau_tr(i) = 0.0_pReal From 1140625b1272754102adc6f8a276563cb241eb32 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 4 Jan 2022 17:29:19 +0100 Subject: [PATCH 22/58] copy and paste error --- src/phase_mechanical_plastic_dislotwin.f90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/phase_mechanical_plastic_dislotwin.f90 b/src/phase_mechanical_plastic_dislotwin.f90 index 41b4854a7..bc6fb0ee6 100644 --- a/src/phase_mechanical_plastic_dislotwin.f90 +++ b/src/phase_mechanical_plastic_dislotwin.f90 @@ -954,7 +954,7 @@ pure subroutine kinetics_tw(Mp,T,dot_gamma_sl,ph,en,& do i = 1, prm%sum_N_tw tau = math_tensordot(Mp,prm%P_tw(1:3,1:3,i)) - x0 = mu*prm%b_tr(i)**2*(2.0_pReal+nu)/(Gamma*8.0_pReal*PI*(1.0_pReal-nu)) ! ToDo: In the paper, the Burgers vector for slip is used + x0 = mu*prm%b_tw(i)**2*(2.0_pReal+nu)/(Gamma*8.0_pReal*PI*(1.0_pReal-nu)) ! ToDo: In the paper, the Burgers vector for slip is used tau_r = mu*prm%b_tw(i)/(2.0_pReal*PI)*(1.0_pReal/(x0+prm%x_c_tw)+cos(PI/3.0_pReal)/x0) ! ToDo: In the paper, the Burgers vector for slip is used if (tau > tol_math_check .and. tau < tau_r) then From 1026d56842662ca8397c4b7f0038dcc3380644f2 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 9 Jan 2022 08:05:42 +0100 Subject: [PATCH 23/58] credit where credit is due referencing DOI gives faster access to actual data --- src/lattice.f90 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index c4b26c679..469bfa556 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -587,8 +587,8 @@ function lattice_C66_trans(Ntrans,C_parent66,lattice_target, & !-------------------------------------------------------------------------------------------------- !> @brief Non-schmid projections for bcc with up to 6 coefficients -! Koester et al. 2012, Acta Materialia 60 (2012) 3894–3901, eq. (17) -! Gröger et al. 2008, Acta Materialia 56 (2008) 5412–5425, table 1 +! https://doi.org/10.1016/j.actamat.2012.03.053, eq. (17) +! https://doi.org/10.1016/j.actamat.2008.07.037, table 1 !-------------------------------------------------------------------------------------------------- function lattice_nonSchmidMatrix(Nslip,nonSchmidCoefficients,sense) result(nonSchmidMatrix) @@ -602,6 +602,7 @@ function lattice_nonSchmidMatrix(Nslip,nonSchmidCoefficients,sense) result(nonSc type(rotation) :: R integer :: i + if (abs(sense) /= 1) error stop 'Sense in lattice_nonSchmidMatrix' coordinateSystem = buildCoordinateSystem(Nslip,BCC_NSLIPSYSTEM,BCC_SYSTEMSLIP,'cI',0.0_pReal) @@ -634,7 +635,9 @@ end function lattice_nonSchmidMatrix !-------------------------------------------------------------------------------------------------- !> @brief Slip-slip interaction matrix -!> details only active slip systems are considered +!> @details only active slip systems are considered +!> @details https://doi.org/10.1016/j.actamat.2016.12.040 (fcc: Tab S4-1, bcc: Tab S5-1) +!> @details https://doi.org/10.1016/j.ijplas.2014.06.010 (hex: Tab 3b) !-------------------------------------------------------------------------------------------------- function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result(interactionMatrix) @@ -646,6 +649,7 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( integer, dimension(:), allocatable :: NslipMax integer, dimension(:,:), allocatable :: interactionTypes + integer, dimension(FCC_NSLIP,FCC_NSLIP), parameter :: & FCC_INTERACTIONSLIPSLIP = reshape( [& 1, 2, 2, 4, 7, 5, 3, 5, 5, 4, 6, 7, 10,11,10,11,12,13, & ! -----> acting (forest) From 0f0108f60330f1470c93879a1003937809180f90 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 9 Jan 2022 08:14:31 +0100 Subject: [PATCH 24/58] polishing --- examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml | 2 +- examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml index 5bc4a2e03..8a3147604 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Mg.yaml @@ -6,7 +6,7 @@ references: output: [xi_sl, xi_tw] -N_sl: [3, 3, 6, 0, 6] # basal, prism, -, 1. pyr, -, 2. pyr +N_sl: [3, 3, 6, 0, 6] # basal, prism, 1. pyr, -, 2. pyr N_tw: [6, 0, 6] # tension, -, compression xi_0_sl: [10.e+6, 55.e+6, 60.e+6, 0., 60.e+6] diff --git a/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml index c238e6afb..890f580cc 100644 --- a/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml +++ b/examples/config/phase/mechanical/plastic/phenopowerlaw_Ti.yaml @@ -8,7 +8,7 @@ references: https://doi.org/10.1016/j.actamat.2017.05.015 output: [gamma_sl] -N_sl: [3, 3, 0, 12] # basal, 1. prism, -, 1. pyr +N_sl: [3, 3, 0, 12] # basal, prism, -, 1. pyr n_sl: 20 a_sl: 2.0 dot_gamma_0_sl: 0.001 From a87db2ba0b683f090e33b2801ba8dc5afa3d15af Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 12 Jan 2022 16:56:24 +0100 Subject: [PATCH 25/58] test for long long integer --- src/parallelization.f90 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/parallelization.f90 b/src/parallelization.f90 index ebb9573d4..17f9698a5 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -86,6 +86,10 @@ subroutine parallelization_init if (err /= 0) error stop 'Could not determine MPI integer size' if (typeSize*8 /= bit_size(0)) error stop 'Mismatch between MPI and DAMASK integer' + call MPI_Type_size(MPI_INTEGER8,typeSize,err) + if (err /= 0) error stop 'Could not determine MPI integer size' + if (typeSize*8 /= bit_size(0_pI64)) error stop 'Mismatch between MPI and DAMASK integer (long long)' + call MPI_Type_size(MPI_DOUBLE,typeSize,err) if (err /= 0) error stop 'Could not determine MPI real size' if (typeSize*8 /= storage_size(0.0_pReal)) error stop 'Mismatch between MPI and DAMASK real' From 18913bb94ea6af197d714d6688a12c6093449673 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 12 Jan 2022 17:33:14 +0100 Subject: [PATCH 26/58] autodetect datatype --- src/results.f90 | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/results.f90 b/src/results.f90 index 7c2346095..4028d0307 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -507,6 +507,7 @@ subroutine results_mapping_phase(ID,entry,label) totalShape !< shape of the dataset (all processes) integer(HID_T) :: & + pI64_t, & ! NOT YET 64 bit! loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -567,14 +568,15 @@ subroutine results_mapping_phase(ID,entry,label) call h5tget_size_f(dt_id, type_size_string, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, hdferr) + pI64_t = h5kind_to_type(kind(entryGlobal),H5_INTEGER_KIND) + call h5tget_size_f(pI64_t, type_size_int, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tinsert_f(dtype_id, 'label', 0_SIZE_T, dt_id,hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(dtype_id, 'entry', type_size_string, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(dtype_id, 'entry', type_size_string, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- @@ -586,7 +588,7 @@ subroutine results_mapping_phase(ID,entry,label) call h5tcreate_f(H5T_COMPOUND_F, type_size_int, entry_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tclose_f(dt_id, hdferr) @@ -660,6 +662,7 @@ subroutine results_mapping_homogenization(ID,entry,label) totalShape !< shape of the dataset (all processes) integer(HID_T) :: & + pI64_t, & ! NOT YET 64 bit! loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -716,14 +719,15 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5tget_size_f(dt_id, type_size_string, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tget_size_f(H5T_NATIVE_INTEGER, type_size_int, hdferr) + pI64_t = h5kind_to_type(kind(entryGlobal),H5_INTEGER_KIND) + call h5tget_size_f(pI64_t, type_size_int, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tcreate_f(H5T_COMPOUND_F, type_size_string + type_size_int, dtype_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tinsert_f(dtype_id, 'label', 0_SIZE_T, dt_id,hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(dtype_id, 'entry', type_size_string, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(dtype_id, 'entry', type_size_string, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' !-------------------------------------------------------------------------------------------------- @@ -735,7 +739,7 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5tcreate_f(H5T_COMPOUND_F, type_size_int, entry_id, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, H5T_NATIVE_INTEGER, hdferr) + call h5tinsert_f(entry_id, 'entry', 0_SIZE_T, pI64_t, hdferr) if(hdferr < 0) error stop 'HDF5 error' call h5tclose_f(dt_id, hdferr) From ae0eead748409ce5557dacc476b88efa8f920a60 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 12 Jan 2022 17:44:07 +0100 Subject: [PATCH 27/58] write out mapping as 64 bit integer --- src/results.f90 | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/results.f90 b/src/results.f90 index 4028d0307..31f3bb173 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -497,9 +497,9 @@ subroutine results_mapping_phase(ID,entry,label) integer, dimension(:,:), intent(in) :: entry !< phase entry at (co,ce) character(len=*), dimension(:), intent(in) :: label !< label of each phase section - integer, dimension(size(entry,1),size(entry,2)) :: & + integer(pI64), dimension(size(entry,1),size(entry,2)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & myShape, & !< shape of the dataset (this process) @@ -507,7 +507,7 @@ subroutine results_mapping_phase(ID,entry,label) totalShape !< shape of the dataset (all processes) integer(HID_T) :: & - pI64_t, & ! NOT YET 64 bit! + pI64_t, & !< HDF5 type for pI64 (8 bit integer) loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -529,7 +529,7 @@ subroutine results_mapping_phase(ID,entry,label) if(hdferr < 0) error stop 'HDF5 error' #ifndef PETSC - entryGlobal = entry -1 ! 0-based + entryGlobal = int(entry -1,pI64) ! 0-based #else !-------------------------------------------------------------------------------------------------- ! MPI settings and communication @@ -550,7 +550,7 @@ subroutine results_mapping_phase(ID,entry,label) entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryGlobal(co,ce) = entry(co,ce) -1 + entryOffset(ID(co,ce),worldrank) + entryGlobal(co,ce) = int(entry(co,ce) -1 + entryOffset(ID(co,ce),worldrank),pI64) end do end do #endif @@ -652,7 +652,7 @@ subroutine results_mapping_homogenization(ID,entry,label) integer, dimension(:), intent(in) :: entry !< homogenization entry at (ce) character(len=*), dimension(:), intent(in) :: label !< label of each homogenization section - integer, dimension(size(entry,1)) :: & + integer(pI64), dimension(size(entry,1)) :: & entryGlobal integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process @@ -662,7 +662,7 @@ subroutine results_mapping_homogenization(ID,entry,label) totalShape !< shape of the dataset (all processes) integer(HID_T) :: & - pI64_t, & ! NOT YET 64 bit! + pI64_t, & !< HDF5 type for pI64 (8 bit integer) loc_id, & !< identifier of group in file dtype_id, & !< identifier of compound data type label_id, & !< identifier of label (string) in compound data type @@ -684,7 +684,7 @@ subroutine results_mapping_homogenization(ID,entry,label) if(hdferr < 0) error stop 'HDF5 error' #ifndef PETSC - entryGlobal = entry -1 ! 0-based + entryGlobal = int(entry -1,pI64) ! 0-based #else !-------------------------------------------------------------------------------------------------- ! MPI settings and communication @@ -702,7 +702,7 @@ subroutine results_mapping_homogenization(ID,entry,label) if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1) - entryGlobal(ce) = entry(ce) -1 + entryOffset(ID(ce),worldrank) + entryGlobal(ce) = int(entry(ce) -1 + entryOffset(ID(ce),worldrank),pI64) end do #endif From fd3c18ea4db4e252031f9422f2c888d9b1754ed7 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Wed, 12 Jan 2022 17:58:44 +0100 Subject: [PATCH 28/58] calculate global entry in 64 bit --- src/HDF5_utilities.f90 | 4 ++-- src/parallelization.f90 | 3 ++- src/results.f90 | 28 ++++++++++++++-------------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index e064997e5..7645189c6 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -1877,7 +1877,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' end if #endif @@ -1977,7 +1977,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process if (ierr /= 0) error stop 'MPI error' end if #endif diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 17f9698a5..a4f7e5ae1 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -88,7 +88,8 @@ subroutine parallelization_init call MPI_Type_size(MPI_INTEGER8,typeSize,err) if (err /= 0) error stop 'Could not determine MPI integer size' - if (typeSize*8 /= bit_size(0_pI64)) error stop 'Mismatch between MPI and DAMASK integer (long long)' + if (int(typeSize,pI64)*8_pI64 /= bit_size(0_pI64)) & + error stop 'Mismatch between MPI and DAMASK integer (long long)' call MPI_Type_size(MPI_DOUBLE,typeSize,err) if (err /= 0) error stop 'Could not determine MPI real size' diff --git a/src/results.f90 b/src/results.f90 index 31f3bb173..48cce82f9 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -499,8 +499,8 @@ subroutine results_mapping_phase(ID,entry,label) integer(pI64), dimension(size(entry,1),size(entry,2)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & myShape, & !< shape of the dataset (this process) myOffset, & @@ -536,21 +536,21 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1 + entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1_pI64 end do end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) do ce = 1, size(ID,2) - entryGlobal(co,ce) = int(entry(co,ce) -1 + entryOffset(ID(co,ce),worldrank),pI64) + entryGlobal(co,ce) = int(entry(co,ce),pI64) -1_pI64 + entryOffset(ID(co,ce),worldrank) end do end do #endif @@ -654,8 +654,8 @@ subroutine results_mapping_homogenization(ID,entry,label) integer(pI64), dimension(size(entry,1)) :: & entryGlobal - integer, dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process - integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(1) :: & myShape, & !< shape of the dataset (this process) myOffset, & @@ -691,18 +691,18 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process if(ierr /= 0) error stop 'MPI error' - entryOffset = 0 + entryOffset = 0_pI64 do ce = 1, size(ID,1) - entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1 + entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1_pI64 end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INT,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process if(ierr /= 0) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1) - entryGlobal(ce) = int(entry(ce) -1 + entryOffset(ID(ce),worldrank),pI64) + entryGlobal(ce) = int(entry(ce),pI64) -1_pI64 + entryOffset(ID(ce),worldrank) end do #endif From 4727652856c8062791ebe92057e21274facf09b4 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 07:37:38 +0100 Subject: [PATCH 29/58] default integer is set via a compiler flag --- src/CPFEM.f90 | 34 +++++++++++++++++----------------- src/DAMASK_Marc.f90 | 6 +++--- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/CPFEM.f90 b/src/CPFEM.f90 index 8f1217a88..8ee26ffe7 100644 --- a/src/CPFEM.f90 +++ b/src/CPFEM.f90 @@ -32,18 +32,18 @@ module CPFEM real(pReal), dimension (:,:,:,:), allocatable, private :: & CPFEM_dcsdE_knownGood !< known good tangent - integer(pInt), public :: & - cycleCounter = 0_pInt !< needs description + integer, public :: & + cycleCounter = 0 !< needs description - integer(pInt), parameter, public :: & - CPFEM_CALCRESULTS = 2_pInt**0_pInt, & - CPFEM_AGERESULTS = 2_pInt**1_pInt, & - CPFEM_BACKUPJACOBIAN = 2_pInt**2_pInt, & - CPFEM_RESTOREJACOBIAN = 2_pInt**3_pInt + integer, parameter, public :: & + CPFEM_CALCRESULTS = 2**0, & + CPFEM_AGERESULTS = 2**1, & + CPFEM_BACKUPJACOBIAN = 2**2, & + CPFEM_RESTOREJACOBIAN = 2**3 type, private :: tNumerics integer :: & - iJacoStiffness !< frequency of stiffness update + iJacoStiffness !< frequency of stiffness update end type tNumerics type(tNumerics), private :: num @@ -134,12 +134,12 @@ end subroutine CPFEM_init !-------------------------------------------------------------------------------------------------- subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyStress, jacobian) - integer(pInt), intent(in) :: elFE, & !< FE element number + integer, intent(in) :: elFE, & !< FE element number ip !< integration point number real(pReal), intent(in) :: dt !< time increment real(pReal), dimension (3,3), intent(in) :: ffn, & !< deformation gradient for t=t0 ffn1 !< deformation gradient for t=t1 - integer(pInt), intent(in) :: mode !< computation mode 1: regular computation plus aging of results + integer, intent(in) :: mode !< computation mode 1: regular computation plus aging of results real(pReal), intent(in) :: temperature_inp !< temperature real(pReal), dimension(6), intent(out) :: cauchyStress !< stress as 6 vector real(pReal), dimension(6,6), intent(out) :: jacobian !< jacobian as 66 tensor (Consistent tangent dcs/dE) @@ -150,7 +150,7 @@ subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyS real(pReal), dimension (3,3,3,3) :: H_sym, & H - integer(pInt) elCP, & ! crystal plasticity element number + integer elCP, & ! crystal plasticity element number i, j, k, l, m, n, ph, homog, mySource,ce real(pReal), parameter :: ODD_STRESS = 1e15_pReal, & !< return value for stress if terminallyIll @@ -171,17 +171,17 @@ subroutine CPFEM_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip, cauchyS print'(a,/)', '#############################################'; flush (6) endif - if (iand(mode, CPFEM_BACKUPJACOBIAN) /= 0_pInt) & + if (iand(mode, CPFEM_BACKUPJACOBIAN) /= 0) & CPFEM_dcsde_knownGood = CPFEM_dcsde - if (iand(mode, CPFEM_RESTOREJACOBIAN) /= 0_pInt) & + if (iand(mode, CPFEM_RESTOREJACOBIAN) /= 0) & CPFEM_dcsde = CPFEM_dcsde_knownGood - if (iand(mode, CPFEM_AGERESULTS) /= 0_pInt) call CPFEM_forward + if (iand(mode, CPFEM_AGERESULTS) /= 0) call CPFEM_forward homogenization_F0(1:3,1:3,ce) = ffn homogenization_F(1:3,1:3,ce) = ffn1 - if (iand(mode, CPFEM_CALCRESULTS) /= 0_pInt) then + if (iand(mode, CPFEM_CALCRESULTS) /= 0) then validCalculation: if (terminallyIll) then call random_number(rnd) @@ -264,8 +264,8 @@ end subroutine CPFEM_forward !-------------------------------------------------------------------------------------------------- subroutine CPFEM_results(inc,time) - integer(pInt), intent(in) :: inc - real(pReal), intent(in) :: time + integer, intent(in) :: inc + real(pReal), intent(in) :: time call results_openJobFile call results_addIncrement(inc,time) diff --git a/src/DAMASK_Marc.f90 b/src/DAMASK_Marc.f90 index 0525db323..6e969ced4 100644 --- a/src/DAMASK_Marc.f90 +++ b/src/DAMASK_Marc.f90 @@ -223,9 +223,9 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, & integer :: computationMode, i, node, CPnodeID integer(pI32) :: defaultNumThreadsInt !< default value set by Marc - integer(pInt), save :: & - theInc = -1_pInt, & !< needs description - lastLovl = 0_pInt !< lovl in previous call to marc hypela2 + integer, save :: & + theInc = -1, & !< needs description + lastLovl = 0 !< lovl in previous call to marc hypela2 real(pReal), save :: & theTime = 0.0_pReal, & !< needs description theDelta = 0.0_pReal From 3fb5bd459c79f992cbd9c457d4a86060b888fe11 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 07:43:53 +0100 Subject: [PATCH 30/58] pInt leftovers --- src/base64.f90 | 2 +- src/grid/discretization_grid.f90 | 4 ++-- src/mesh/FEM_utilities.f90 | 6 +++--- src/prec.f90 | 16 ++++++++-------- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/base64.f90 b/src/base64.f90 index 7bc00a1f9..448f57d2a 100644 --- a/src/base64.f90 +++ b/src/base64.f90 @@ -151,7 +151,7 @@ pure logical function validBase64(base64_str) l = len(base64_str,pI64) validBase64 = .true. - if(mod(l,4_pI64)/=0_pI64 .or. l < 4_pInt) validBase64 = .false. + if(mod(l,4_pI64)/=0_pI64 .or. l < 4_pI64) validBase64 = .false. if(verify(base64_str(:l-2_pI64),base64_encoding, kind=pI64) /= 0_pI64) validBase64 = .false. if(verify(base64_str(l-1_pI64:),base64_encoding//'=',kind=pI64) /= 0_pI64) validBase64 = .false. diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index ddc9efd80..54851c1fe 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -307,7 +307,7 @@ subroutine readVTI(grid,geomSize,origin,material, & case('Float64') as_Int = int(prec_bytesToC_DOUBLE (asBytes(base64_str,headerType,compressed))) case default - call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType)) + call IO_error(844,ext_msg='unknown data type: '//trim(dataType)) end select end function as_Int @@ -335,7 +335,7 @@ subroutine readVTI(grid,geomSize,origin,material, & case('Float64') as_pReal = real(prec_bytesToC_DOUBLE (asBytes(base64_str,headerType,compressed)),pReal) case default - call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType)) + call IO_error(844,ext_msg='unknown data type: '//trim(dataType)) end select end function as_pReal diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index df16fa8e2..86fb52b36 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -92,7 +92,7 @@ subroutine FEM_utilities_init p_i !< integration order (quadrature rule) character(len=*), parameter :: & PETSCDEBUG = ' -snes_view -snes_monitor ' - PetscErrorCode :: ierr + PetscErrorCode :: ierr logical :: debugPETSc !< use some in debug defined options for more verbose PETSc solution @@ -103,9 +103,9 @@ subroutine FEM_utilities_init p_s = num_mesh%get_asInt('p_s',defaultVal = 2) p_i = num_mesh%get_asInt('p_i',defaultVal = p_s) - if (p_s < 1_pInt .or. p_s > size(FEM_nQuadrature,2)) & + if (p_s < 1 .or. p_s > size(FEM_nQuadrature,2)) & call IO_error(821,ext_msg='shape function order (p_s) out of bounds') - if (p_i < max(1_pInt,p_s-1_pInt) .or. p_i > p_s) & + if (p_i < max(1,p_s-1) .or. p_i > p_s) & call IO_error(821,ext_msg='integration order (p_i) out of bounds') debug_mesh => config_debug%get('mesh',defaultVal=emptyList) diff --git a/src/prec.f90 b/src/prec.f90 index fe3e7fc20..2616a6dcf 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -29,10 +29,10 @@ module prec #endif #ifdef PETSC PetscInt, private :: dummy - integer, parameter :: pPetscInt = kind(dummy) + integer, parameter :: pPETSCINT = kind(dummy) #endif - integer, parameter :: pStringLen = 256 !< default string length - integer, parameter :: pPathLen = 4096 !< maximum length of a path name on linux + integer, parameter :: pSTRINGLEN = 256 !< default string length + integer, parameter :: pPATHLEN = 4096 !< maximum length of a path name on linux real(pReal), parameter :: tol_math_check = 1.0e-8_pReal !< tolerance for internal math self-checks (rotation) @@ -277,7 +277,7 @@ subroutine selfTest integer, allocatable, dimension(:) :: realloc_lhs_test real(pReal), dimension(1) :: f - integer(pInt), dimension(1) :: i + integer(pI64), dimension(1) :: i real(pReal), dimension(2) :: r @@ -298,11 +298,11 @@ subroutine selfTest f = real(prec_bytesToC_DOUBLE(int([0,0,0,-32,+119,+65,+115,65],C_SIGNED_CHAR)),pReal) if (dNeq(f(1),20191102.0_pReal,0.0_pReal)) error stop 'prec_bytesToC_DOUBLE' - i = int(prec_bytesToC_INT32_T(int([+126,+23,+52,+1],C_SIGNED_CHAR)),pInt) - if (i(1) /= 20191102_pInt) error stop 'prec_bytesToC_INT32_T' + i = int(prec_bytesToC_INT32_T(int([+126,+23,+52,+1],C_SIGNED_CHAR)),pI64) + if (i(1) /= 20191102_pI64) error stop 'prec_bytesToC_INT32_T' - i = int(prec_bytesToC_INT64_T(int([+126,+23,+52,+1,0,0,0,0],C_SIGNED_CHAR)),pInt) - if (i(1) /= 20191102_pInt) error stop 'prec_bytesToC_INT64_T' + i = int(prec_bytesToC_INT64_T(int([+126,+23,+52,+1,0,0,0,0],C_SIGNED_CHAR)),pI64) + if (i(1) /= 20191102_pI64) error stop 'prec_bytesToC_INT64_T' end subroutine selfTest From 8223dc7fa7b01b3e540f040fb2a82b90394db287 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 08:17:31 +0100 Subject: [PATCH 31/58] polishing MPI, HDF5, PETSc, and DAMASK might have different integer kinds .. --- src/grid/spectral_utilities.f90 | 4 ++-- src/parallelization.f90 | 14 +++++++------- src/quit.f90 | 26 +++++++++++++++----------- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f40025039..415c4f838 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -292,12 +292,12 @@ subroutine spectral_utilities_init tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock tensorField_real, tensorField_fourier, & ! input data, output data PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planTensorForth)) error stop 'FFTW error' + if (.not. c_associated(planTensorForth)) error stop 'FFTW error' planTensorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock tensorField_fourier,tensorField_real, & ! input data, output data PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision - if (.not. C_ASSOCIATED(planTensorBack)) error stop 'FFTW error' + if (.not. c_associated(planTensorBack)) error stop 'FFTW error' !-------------------------------------------------------------------------------------------------- ! vector MPI fftw plans diff --git a/src/parallelization.f90 b/src/parallelization.f90 index a4f7e5ae1..6e1dedd70 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -50,7 +50,7 @@ subroutine parallelization_init !$ character(len=6) NumThreadsString - PetscErrorCode :: petsc_err + PetscErrorCode :: err_PETSc #ifdef _OPENMP ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. ! Otherwise, the first call to PETSc will do the initialization. @@ -60,18 +60,18 @@ subroutine parallelization_init #endif #if defined(DEBUG) - call PetscInitialize(PETSC_NULL_CHARACTER,petsc_err) + call PetscInitialize(PETSC_NULL_CHARACTER,err_PETSc) #else - call PetscInitializeNoArguments(petsc_err) + call PetscInitializeNoArguments(err_PETSc) #endif - CHKERRQ(petsc_err) + CHKERRQ(err_PETSc) #if defined(DEBUG) && defined(__INTEL_COMPILER) - call PetscSetFPTrap(PETSC_FP_TRAP_ON,petsc_err) + call PetscSetFPTrap(PETSC_FP_TRAP_ON,err_PETSc) #else - call PetscSetFPTrap(PETSC_FP_TRAP_OFF,petsc_err) + call PetscSetFPTrap(PETSC_FP_TRAP_OFF,err_PETSc) #endif - CHKERRQ(petsc_err) + CHKERRQ(err_PETSc) call MPI_Comm_rank(MPI_COMM_WORLD,worldrank,err) if (err /= 0) error stop 'Could not determine worldrank' diff --git a/src/quit.f90 b/src/quit.f90 index 6361ff783..3f9aabdf5 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -15,20 +15,21 @@ subroutine quit(stop_id) implicit none integer, intent(in) :: stop_id integer, dimension(8) :: dateAndTime - integer :: error - PetscErrorCode :: ierr = 0 + integer :: err_HDF5 + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc - call h5open_f(error) - if (error /= 0) write(6,'(a,i5)') ' Error in h5open_f ',error ! prevents error if not opened yet - call h5close_f(error) - if (error /= 0) write(6,'(a,i5)') ' Error in h5close_f ',error + call h5open_f(err_HDF5) + if (err_HDF5 /= 0) write(6,'(a,i5)') ' Error in h5open_f ',err_HDF5 ! prevents error if not opened yet + call h5close_f(err_HDF5) + if (err_HDF5 /= 0) write(6,'(a,i5)') ' Error in h5close_f ',err_HDF5 - call PetscFinalize(ierr) - CHKERRQ(ierr) + call PetscFinalize(err_PETSc) + CHKERRQ(err_PETSc) #ifdef _OPENMP - call MPI_finalize(error) - if (error /= 0) write(6,'(a,i5)') ' Error in MPI_finalize',error + call MPI_finalize(err_MPI) + if (err_MPI /= 0) write(6,'(a,i5)') ' Error in MPI_finalize',err_MPI #endif call date_and_time(values = dateAndTime) @@ -40,7 +41,10 @@ subroutine quit(stop_id) dateAndTime(6),':',& dateAndTime(7) - if (stop_id == 0 .and. ierr == 0 .and. error == 0) stop 0 ! normal termination + if (stop_id == 0 .and. & + err_HDF5 == 0 .and. & + err_MPI == 0_MPI_INTEGER_KIND .and. & + err_PETSC == 0) stop 0 ! normal termination stop 1 ! error (message from IO_error) end subroutine quit From 1c46e7ea1a73a70eeac965a84683244beadc421b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 08:30:46 +0100 Subject: [PATCH 32/58] not needed --- src/prec.f90 | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/prec.f90 b/src/prec.f90 index 2616a6dcf..8de82fee8 100644 --- a/src/prec.f90 +++ b/src/prec.f90 @@ -22,11 +22,6 @@ module prec integer, parameter :: pReal = IEEE_selected_real_kind(15,307) !< number with 15 significant digits, up to 1e+-307 (typically 64 bit) integer, parameter :: pI32 = selected_int_kind(9) !< number with at least up to +-1e9 (typically 32 bit) integer, parameter :: pI64 = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit) -#if(INT==8) - integer, parameter :: pInt = pI64 -#else - integer, parameter :: pInt = pI32 -#endif #ifdef PETSC PetscInt, private :: dummy integer, parameter :: pPETSCINT = kind(dummy) From a3a3388855fd16765b27b6d0d7f7956479f6f3cd Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 08:43:39 +0100 Subject: [PATCH 33/58] decouple DAMASK default integer from MPI default integer --- src/grid/spectral_utilities.f90 | 90 +++++++++++++++++---------------- src/parallelization.f90 | 64 ++++++++++++++--------- src/results.f90 | 22 ++++---- 3 files changed, 98 insertions(+), 78 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 415c4f838..35803481c 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -144,7 +144,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine spectral_utilities_init - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer :: i, j, k, & FFTW_planner_flag integer, dimension(3) :: k_s @@ -193,13 +193,13 @@ subroutine spectral_utilities_init 'add more using the "PETSc_options" keyword in numerics.yaml' flush(IO_STDOUT) - call PetscOptionsClear(PETSC_NULL_OPTIONS,ierr) - CHKERRQ(ierr) - if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr) - CHKERRQ(ierr) + call PetscOptionsClear(PETSC_NULL_OPTIONS,err_PETSc) + CHKERRQ(err_PETSc) + if (debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),err_PETSc) + CHKERRQ(err_PETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,& - num_grid%get_asString('PETSc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) grid1Red = grid(1)/2 + 1 wgt = 1.0/real(product(grid),pReal) @@ -559,8 +559,9 @@ end subroutine utilities_fourierGreenConvolution !-------------------------------------------------------------------------------------------------- real(pReal) function utilities_divergenceRMS() - integer :: i, j, k, ierr - complex(pReal), dimension(3) :: rescaledGeom + integer :: i, j, k + integer(MPI_INTEGER_KIND) :: err_MPI + complex(pReal), dimension(3) :: rescaledGeom print'(/,1x,a)', '... calculating divergence ................................................' flush(IO_STDOUT) @@ -589,8 +590,8 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2) enddo; enddo if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space end function utilities_divergenceRMS @@ -601,7 +602,8 @@ end function utilities_divergenceRMS !-------------------------------------------------------------------------------------------------- real(pReal) function utilities_curlRMS() - integer :: i, j, k, l, ierr + integer :: i, j, k, l + integer(MPI_INTEGER_KIND) :: err_MPI complex(pReal), dimension(3,3) :: curl_fourier complex(pReal), dimension(3) :: rescaledGeom @@ -649,8 +651,8 @@ real(pReal) function utilities_curlRMS() + sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -799,8 +801,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame - integer :: & - i,ierr + integer :: i + integer(MPI_INTEGER_KIND) :: err_MPI real(pReal), dimension(3,3,3,3) :: dPdF_max, dPdF_min real(pReal) :: dPdF_norm_max, dPdF_norm_min real(pReal), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF @@ -818,7 +820,8 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (debugRotation) print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & 'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal if (present(rotation_BC)) P_av = rotation_BC%rotate(P_av) @@ -842,22 +845,22 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& enddo valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -906,12 +909,13 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim) real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: & utilities_forwardField real(pReal), dimension(3,3) :: fieldDiff !< - aim - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI utilities_forwardField = field_lastInc + rate*Delta_t if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3) @@ -982,8 +986,8 @@ subroutine utilities_updateCoords(F) integer :: & i,j,k,n, & rank_t, rank_b, & - c, & - ierr + c + integer(MPI_INTEGER_KIND) :: err_MPI #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) type(MPI_Request), dimension(4) :: request type(MPI_Status), dimension(4) :: status @@ -1025,8 +1029,8 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! pad cell center fluctuations along z-direction (needed when running MPI simulation) @@ -1036,19 +1040,19 @@ subroutine utilities_updateCoords(F) rank_b = modulo(worldrank-1,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),ierr) - if (ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),ierr) - if (ierr /=0) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Waitall(4,request,status,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Waitall(4,request,status,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) ! ToDo #else diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 6e1dedd70..94bf6d343 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -20,12 +20,19 @@ module parallelization implicit none private - integer, protected, public :: & - worldrank = 0, & !< MPI worldrank (/=0 for MPI simulations only) - worldsize = 1 !< MPI worldsize (/=1 for MPI simulations only) +#ifndef PETSC + integer, parameter, public :: & + MPI_INTEGER_KIND = pI64 + integer(MPI_INTEGER_KIND), parameter, public :: & + worldrank = 0_MPI_INTEGER_KIND, & !< MPI dummy worldrank + worldsize = 1_MPI_INTEGER_KIND !< MPI dummy worldsize +#else + integer(MPI_INTEGER_KIND), protected, public :: & + worldrank = 0_MPI_INTEGER_KIND, & !< MPI worldrank (/=0 for MPI simulations only) + worldsize = 1_MPI_INTEGER_KIND !< MPI worldsize (/=1 for MPI simulations only) +#endif #ifndef PETSC -public :: parallelization_bcast_str contains subroutine parallelization_bcast_str(string) @@ -44,7 +51,7 @@ contains !-------------------------------------------------------------------------------------------------- subroutine parallelization_init - integer :: err, typeSize + integer(MPI_INTEGER_KIND) :: err_MPI, typeSize !$ integer :: got_env, threadLevel !$ integer(pI32) :: OMP_NUM_THREADS !$ character(len=6) NumThreadsString @@ -54,8 +61,8 @@ subroutine parallelization_init #ifdef _OPENMP ! If openMP is enabled, check if the MPI libary supports it and initialize accordingly. ! Otherwise, the first call to PETSc will do the initialization. - call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err) - if (err /= 0) error stop 'MPI init failed' + call MPI_Init_Thread(MPI_THREAD_FUNNELED,threadLevel,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI init failed' if (threadLevel>>' - call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err) - if (err /= 0) error stop 'Could not determine worldsize' + call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine worldsize' if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize - call MPI_Type_size(MPI_INTEGER,typeSize,err) - if (err /= 0) error stop 'Could not determine MPI integer size' - if (typeSize*8 /= bit_size(0)) error stop 'Mismatch between MPI and DAMASK integer' + call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine size of MPI_INTEGER' + if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_INTEGER and DAMASK default integer' - call MPI_Type_size(MPI_INTEGER8,typeSize,err) - if (err /= 0) error stop 'Could not determine MPI integer size' - if (int(typeSize,pI64)*8_pI64 /= bit_size(0_pI64)) & - error stop 'Mismatch between MPI and DAMASK integer (long long)' + call MPI_Type_size(MPI_INTEGER8,typeSize,err_MPI) + if (err_MPI /= 0) & + error stop 'Could not determine size of MPI_INTEGER8' + if (typeSize*8_MPI_INTEGER_KIND /= int(bit_size(0_pI64),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_INTEGER8 and DAMASK pI64' - call MPI_Type_size(MPI_DOUBLE,typeSize,err) - if (err /= 0) error stop 'Could not determine MPI real size' - if (typeSize*8 /= storage_size(0.0_pReal)) error stop 'Mismatch between MPI and DAMASK real' + call MPI_Type_size(MPI_DOUBLE,typeSize,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) & + error stop 'Could not determine size of MPI_DOUBLE' + if (typeSize*8_MPI_INTEGER_KIND /= int(storage_size(0.0_pReal),MPI_INTEGER_KIND)) & + error stop 'Mismatch between MPI_DOUBLE and DAMASK pReal' if (worldrank /= 0) then close(OUTPUT_UNIT) ! disable output @@ -124,14 +138,14 @@ subroutine parallelization_bcast_str(string) character(len=:), allocatable, intent(inout) :: string - integer :: strlen, ierr ! pI64 for strlen not supported by MPI + integer(MPI_INTEGER_KIND) :: strlen, err_MPI - if (worldrank == 0) strlen = len(string) - call MPI_Bcast(strlen,1,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) + if (worldrank == 0) strlen = len(string,MPI_INTEGER_KIND) + call MPI_Bcast(strlen,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) if (worldrank /= 0) allocate(character(len=strlen)::string) - call MPI_Bcast(string,strlen,MPI_CHARACTER,0,MPI_COMM_WORLD, ierr) + call MPI_Bcast(string,strlen,MPI_CHARACTER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) end subroutine parallelization_bcast_str diff --git a/src/results.f90 b/src/results.f90 index 48cce82f9..d85a30dc0 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -519,7 +519,8 @@ subroutine results_mapping_phase(ID,entry,label) dt_id integer(SIZE_T) :: type_size_string, type_size_int - integer :: hdferr, ierr, ce, co + integer :: hdferr, ce, co + integer(MPI_INTEGER_KIND) :: err_MPI writeSize = 0 @@ -536,8 +537,8 @@ subroutine results_mapping_phase(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset = 0_pI64 do co = 1, size(ID,1) @@ -545,8 +546,8 @@ subroutine results_mapping_phase(ID,entry,label) entryOffset(ID(co,ce),worldrank) = entryOffset(ID(co,ce),worldrank) +1_pI64 end do end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do co = 1, size(ID,1) do ce = 1, size(ID,2) @@ -674,7 +675,8 @@ subroutine results_mapping_homogenization(ID,entry,label) dt_id integer(SIZE_T) :: type_size_string, type_size_int - integer :: hdferr, ierr, ce + integer :: hdferr, ce + integer(MPI_INTEGER_KIND) :: err_MPI writeSize = 0 @@ -691,15 +693,15 @@ subroutine results_mapping_homogenization(ID,entry,label) call h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) ! get output at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get output at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset = 0_pI64 do ce = 1, size(ID,1) entryOffset(ID(ce),worldrank) = entryOffset(ID(ce),worldrank) +1_pI64 end do - call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,ierr)! get offset at each process - if(ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(MPI_IN_PLACE,entryOffset,size(entryOffset),MPI_INTEGER8,MPI_SUM,MPI_COMM_WORLD,err_MPI)! get offset at each process + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' entryOffset(:,worldrank) = sum(entryOffset(:,0:worldrank-1),2) do ce = 1, size(ID,1) entryGlobal(ce) = int(entry(ce),pI64) -1_pI64 + entryOffset(ID(ce),worldrank) From 64395d4b2b53b8321133df5676d59414edc1c2fd Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 10:18:17 +0100 Subject: [PATCH 34/58] not needed anymore setting precision is done via compiler flag. Theoretically, it would work to simply use pI64 for the interface (hypela2 and uedinc) but stupid common blocks would need to be adjusted as well --- install/MarcMentat/2020/Marc_tools/include_linux64.patch | 6 ++---- install/MarcMentat/2021.2/Marc_tools/include_linux64.patch | 6 ++---- .../MarcMentat/2021.3.1/Marc_tools/include_linux64.patch | 6 ++---- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/install/MarcMentat/2020/Marc_tools/include_linux64.patch b/install/MarcMentat/2020/Marc_tools/include_linux64.patch index c28b41b9e..9849584fa 100644 --- a/install/MarcMentat/2020/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2020/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then -@@ -390,8 +395,8 @@ +@@ -390,7 +395,7 @@ I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -498,7 +503,7 @@ PROFILE=" $PROFILE -pg" fi diff --git a/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch b/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch index 430d5bc61..5ff02dae6 100644 --- a/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2021.2/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then -@@ -439,8 +444,8 @@ +@@ -439,7 +444,7 @@ I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -556,7 +561,7 @@ PROFILE=" $PROFILE -pg" fi diff --git a/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch b/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch index 91e2b1b59..e1cc543c6 100644 --- a/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch +++ b/install/MarcMentat/2021.3.1/Marc_tools/include_linux64.patch @@ -12,17 +12,15 @@ # AEM if test "$MARCDLLOUTDIR" = ""; then DLLOUTDIR="$MARC_LIB" -@@ -439,8 +444,8 @@ if test "$MARC_INTEGER_SIZE" = "i4" ; then +@@ -439,7 +444,7 @@ if test "$MARC_INTEGER_SIZE" = "i4" ; then I8DEFINES= I8CDEFINES= else - I8FFLAGS="-i8" -- I8DEFINES="-DI64" + I8FFLAGS="-i8 -integer-size 64" -+ I8DEFINES="-DI64 -DINT=8" + I8DEFINES="-DI64" I8CDEFINES="-U_DOUBLE -D_SINGLE" fi - @@ -556,7 +561,7 @@ then PROFILE=" $PROFILE -pg" fi From a7417a7ad76c78a2d6fc96fa331c2981849c2a88 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 10:41:36 +0100 Subject: [PATCH 35/58] default integer, PETSc integer, and MPI integer might be different --- src/HDF5_utilities.f90 | 15 +- src/grid/DAMASK_grid.f90 | 14 +- src/grid/discretization_grid.f90 | 39 +- src/grid/grid_mech_FEM.f90 | 277 ++++++------- src/grid/grid_mech_spectral_basic.f90 | 119 +++--- src/grid/grid_mech_spectral_polarisation.f90 | 126 +++--- src/mesh/DAMASK_mesh.f90 | 6 +- src/mesh/FEM_utilities.f90 | 58 +-- src/mesh/discretization_mesh.f90 | 97 ++--- src/mesh/mesh_mech_FEM.f90 | 386 ++++++++++--------- src/results.f90 | 2 +- 11 files changed, 607 insertions(+), 532 deletions(-) diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index 7645189c6..d2076c2cc 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -1857,13 +1857,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ localShape integer(HSIZE_T), intent(out), dimension(size(localShape,1)):: & myStart, & - globalShape !< shape of the dataset (all processes) + globalShape !< shape of the dataset (all processes) integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id integer, dimension(worldsize) :: & readSize !< contribution of all processes - integer :: ierr integer :: hdferr + integer(MPI_INTEGER_KIND) :: err_MPI !------------------------------------------------------------------------------------------------- ! creating a property list for transfer properties (is collective for MPI) @@ -1877,8 +1877,8 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_ if (parallel) then call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr) if(hdferr < 0) error stop 'HDF5 error' - call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process - if (ierr /= 0) error stop 'MPI error' + call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif myStart = int(0,HSIZE_T) @@ -1956,7 +1956,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & integer, dimension(worldsize) :: writeSize !< contribution of all processes integer(HID_T) :: dcpl - integer :: ierr, hdferr + integer :: hdferr + integer(MPI_INTEGER_KIND) :: err_MPI integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T @@ -1977,8 +1978,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, & writeSize(worldrank+1) = int(myShape(ubound(myShape,1))) #ifdef PETSC if (parallel) then - call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,ierr) ! get total output size over each process - if (ierr /= 0) error stop 'MPI error' + call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if #endif myStart = int(0,HSIZE_T) diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index b1da0c2a2..6377748be 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -75,7 +75,6 @@ program DAMASK_grid integer :: & i, j, m, field, & errorID = 0, & - ierr,& cutBackLevel = 0, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$ stepFraction = 0, & !< fraction of current time interval l = 0, & !< current load case @@ -86,6 +85,7 @@ program DAMASK_grid nActiveFields = 0, & maxCutBack, & !< max number of cut backs stagItMax !< max number of field level staggered iterations + integer(MPI_INTEGER_KIND) :: err_MPI character(len=pStringLen) :: & incInfo @@ -455,23 +455,23 @@ program DAMASK_grid print'(/,1x,a,i0,a)', 'increment ', totalIncsCounter, ' NOT converged' endif; flush(IO_STDOUT) - call MPI_Allreduce(interface_SIGUSR1,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGUSR1,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (mod(inc,loadCases(l)%f_out) == 0 .or. signal) then print'(/,1x,a)', '... writing results to file ...............................................' flush(IO_STDOUT) call CPFEM_results(totalIncsCounter,t) endif if (signal) call interface_setSIGUSR1(.false.) - call MPI_Allreduce(interface_SIGUSR2,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGUSR2,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (mod(inc,loadCases(l)%f_restart) == 0 .or. signal) then call mechanical_restartWrite call CPFEM_restartWrite endif if (signal) call interface_setSIGUSR2(.false.) - call MPI_Allreduce(interface_SIGTERM,signal,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Allreduce(interface_SIGTERM,signal,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (signal) exit loadCaseLooping endif skipping diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 54851c1fe..93b3cbc91 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -27,14 +27,14 @@ module discretization_grid private integer, dimension(3), public, protected :: & - grid !< (global) grid + grid !< (global) grid integer, public, protected :: & - grid3, & !< (local) grid in 3rd direction + grid3, & !< (local) grid in 3rd direction grid3Offset !< (local) grid offset in 3rd direction real(pReal), dimension(3), public, protected :: & - geomSize !< (global) physical size + geomSize !< (global) physical size real(pReal), public, protected :: & - size3, & !< (local) size in 3rd direction + size3, & !< (local) size in 3rd direction size3offset !< (local) size offset in 3rd direction public :: & @@ -62,8 +62,8 @@ subroutine discretization_grid_init(restart) integer :: & j, & - debug_element, debug_ip, & - ierr + debug_element, debug_ip + integer(MPI_INTEGER_KIND) :: err_MPI integer(C_INTPTR_T) :: & devNull, z, z_offset integer, dimension(worldsize) :: & @@ -88,13 +88,13 @@ subroutine discretization_grid_init(restart) end if - call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' print'(/,1x,a,3(i12,1x))', 'cells a b c: ', grid print '(1x,a,3(es12.5,1x))', 'size x y z: ', geomSize @@ -118,14 +118,17 @@ subroutine discretization_grid_init(restart) myGrid = [grid(1:2),grid3] mySize = [geomSize(1:2),size3] - call MPI_Gather(product(grid(1:2))*grid3Offset,1,MPI_INTEGER,displs, 1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' - call MPI_Gather(product(myGrid), 1,MPI_INTEGER,sendcounts,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Gather(product(grid(1:2))*grid3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& + 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& + 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' allocate(materialAt(product(myGrid))) - call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - if (ierr /= 0) error stop 'MPI error' + call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),& + MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call discretization_init(materialAt, & IPcoordinates0(myGrid,mySize,grid3Offset), & diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 6030e9a74..dbb87440b 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -107,7 +107,8 @@ subroutine grid_mechanical_FEM_init 1.0_pReal,-1.0_pReal,-1.0_pReal,-1.0_pReal, & 1.0_pReal, 1.0_pReal, 1.0_pReal, 1.0_pReal], [4,8]) real(pReal), dimension(3,3,3,3) :: devNull - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc PetscInt, dimension(0:worldsize-1) :: localK @@ -146,10 +147,10 @@ subroutine grid_mechanical_FEM_init call PetscOptionsInsertString(PETSC_NULL_OPTIONS, & '-mechanical_snes_type newtonls -mechanical_ksp_type fgmres & &-mechanical_ksp_max_it 25 -mechanical_mg_levels_ksp_type chebyshev', & - ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -159,13 +160,14 @@ subroutine grid_mechanical_FEM_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,ierr) - CHKERRQ(ierr) - call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',ierr) - CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',err_PETSc) + CHKERRQ(err_PETSc) localK = 0_pPetscInt localK(worldrank) = int(grid3,pPetscInt) - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DMDA_STENCIL_BOX, & @@ -173,45 +175,45 @@ subroutine grid_mechanical_FEM_init 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap) [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid - mechanical_grid,ierr) - CHKERRQ(ierr) - call SNESSetDM(mechanical_snes,mechanical_grid,ierr) - CHKERRQ(ierr) - call DMsetFromOptions(mechanical_grid,ierr) - CHKERRQ(ierr) - call DMsetUp(mechanical_grid,ierr) - CHKERRQ(ierr) - call DMDASetUniformCoordinates(mechanical_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_current,ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_lastInc,ierr) - CHKERRQ(ierr) - call DMCreateGlobalVector(mechanical_grid,solution_rate ,ierr) - CHKERRQ(ierr) - call DMSNESSetFunctionLocal(mechanical_grid,formResidual,PETSC_NULL_SNES,ierr) - CHKERRQ(ierr) - call DMSNESSetJacobianLocal(mechanical_grid,formJacobian,PETSC_NULL_SNES,ierr) - CHKERRQ(ierr) - call SNESSetConvergenceTest(mechanical_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "_converged" - CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPetscInt), ierr) ! ignore linear solve failures - CHKERRQ(ierr) - call SNESSetFromOptions(mechanical_snes,ierr) ! pull it all together with additional cli arguments - CHKERRQ(ierr) + mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetDM(mechanical_snes,mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMsetFromOptions(mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMsetUp(mechanical_grid,err_PETSc) + CHKERRQ(err_PETSc) + call DMDASetUniformCoordinates(mechanical_grid,0.0_pReal,geomSize(1),0.0_pReal,geomSize(2),0.0_pReal,geomSize(3),err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_lastInc,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_grid,solution_rate ,err_PETSc) + CHKERRQ(err_PETSc) + call DMSNESSetFunctionLocal(mechanical_grid,formResidual,PETSC_NULL_SNES,err_PETSc) + CHKERRQ(err_PETSc) + call DMSNESSetJacobianLocal(mechanical_grid,formJacobian,PETSC_NULL_SNES,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetConvergenceTest(mechanical_snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "_converged" + CHKERRQ(err_PETSc) + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPetscInt), err_PETSc) ! ignore linear solve failures + CHKERRQ(err_PETSc) + call SNESSetFromOptions(mechanical_snes,err_PETSc) ! pull it all together with additional cli arguments + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! init fields - call VecSet(solution_current,0.0_pReal,ierr);CHKERRQ(ierr) - call VecSet(solution_lastInc,0.0_pReal,ierr);CHKERRQ(ierr) - call VecSet(solution_rate ,0.0_pReal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call VecSet(solution_current,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call VecSet(solution_lastInc,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call VecSet(solution_rate ,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) - call DMDAGetCorners(mechanical_grid,xstart,ystart,zstart,xend,yend,zend,ierr) ! local grid extent - CHKERRQ(ierr) + call DMDAGetCorners(mechanical_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) ! local grid extent + CHKERRQ(err_PETSc) xend = xstart+xend-1 yend = ystart+yend-1 zend = zstart+zend-1 @@ -239,17 +241,17 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(u_current,groupHandle,'u') @@ -265,19 +267,19 @@ subroutine grid_mechanical_FEM_init call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2 F, & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if(ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) @@ -300,7 +302,7 @@ function grid_mechanical_FEM_solution(incInfoIn) result(solution) solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -311,13 +313,13 @@ function grid_mechanical_FEM_solution(incInfoIn) result(solution) !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(mechanical_snes,PETSC_NULL_VEC,solution_current,ierr) - CHKERRQ(ierr) + call SNESsolve(mechanical_snes,PETSC_NULL_VEC,solution_current,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(mechanical_snes,reason,ierr) - CHKERRQ(ierr) + call SNESGetConvergedReason(mechanical_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -347,22 +349,22 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: & u_current,u_lastInc - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) if (cutBack) then C_volAvg = C_volAvgLastInc else C_volAvgLastInc = C_volAvg - F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components + F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components F_aim_lastInc = F_aim !----------------------------------------------------------------------------------------------- @@ -379,13 +381,14 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai endif if (guess) then - call VecWAXPY(solution_rate,-1.0_pReal,solution_lastInc,solution_current,ierr) - CHKERRQ(ierr) - call VecScale(solution_rate,1.0_pReal/Delta_t_old,ierr); CHKERRQ(ierr) + call VecWAXPY(solution_rate,-1.0_pReal,solution_lastInc,solution_current,err_PETSc) + CHKERRQ(err_PETSc) + call VecScale(solution_rate,1.0_pReal/Delta_t_old,err_PETSc) + CHKERRQ(err_PETSc) else - call VecSet(solution_rate,0.0_pReal,ierr); CHKERRQ(ierr) + call VecSet(solution_rate,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) endif - call VecCopy(solution_current,solution_lastInc,ierr); CHKERRQ(ierr) + call VecCopy(solution_current,solution_lastInc,err_PETSc); CHKERRQ(err_PETSc) F_lastInc = F @@ -400,12 +403,12 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai if (stress_BC%myType=='dot_P') P_aim = P_aim & + merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t - call VecAXPY(solution_current,Delta_t,solution_rate,ierr); CHKERRQ(ierr) - - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call VecAXPY(solution_current,Delta_t,solution_rate,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -431,15 +434,15 @@ end subroutine grid_mechanical_FEM_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_FEM_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc - call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) print'(1x,a)', 'writing solver data required for restart to file'; flush(IO_STDOUT) @@ -465,10 +468,10 @@ subroutine grid_mechanical_FEM_restartWrite call HDF5_closeFile(fileHandle) endif - call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,ierr) - CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,ierr) - CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_current,u_current,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_FEM_restartWrite @@ -476,7 +479,7 @@ end subroutine grid_mechanical_FEM_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -486,7 +489,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,fnorm,reason,dummy,i fnorm SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & err_div, & divTol, & @@ -520,7 +523,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(da_local,x_local, & - f_local,dummy,ierr) + f_local,dummy,err_PETSc) DM :: da_local Vec :: x_local, f_local @@ -531,13 +534,14 @@ subroutine formResidual(da_local,x_local, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI real(pReal), dimension(3,3,3,3) :: devNull - call SNESGetNumberFunctionEvals(mechanical_snes,nfuncs,ierr) - CHKERRQ(ierr) - call SNESGetIterationNumber(mechanical_snes,PETScIter,ierr) - CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(mechanical_snes,nfuncs,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetIterationNumber(mechanical_snes,PETScIter,err_PETSc) + CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -555,7 +559,7 @@ subroutine formResidual(da_local,x_local, & !-------------------------------------------------------------------------------------------------- ! get deformation gradient - call DMDAVecGetArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 do kk = 0, 1; do jj = 0, 1; do ii = 0, 1 @@ -565,14 +569,15 @@ subroutine formResidual(da_local,x_local, & ii = i-xstart+1; jj = j-ystart+1; kk = k-zstart+1 F(1:3,1:3,ii,jj,kk) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem)) enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call utilities_constitutiveResponse(P_current,& P_av,C_volAvg,devNull, & F,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! stress BC handling @@ -581,9 +586,9 @@ subroutine formResidual(da_local,x_local, & !-------------------------------------------------------------------------------------------------- ! constructing residual - call VecSet(f_local,0.0_pReal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) + call VecSet(f_local,0.0_pReal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 @@ -603,12 +608,12 @@ subroutine formResidual(da_local,x_local, & f_scal(0:2,i+ii,j+jj,k+kk) = f_scal(0:2,i+ii,j+jj,k+kk) + f_elem(ctr,1:3) enddo; enddo; enddo enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,ierr);CHKERRQ(ierr) - call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc);CHKERRQ(err_PETSc) + call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! applying boundary conditions - call DMDAVecGetArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecGetArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) if (zstart == 0) then f_scal(0:2,xstart,ystart,zstart) = 0.0 f_scal(0:2,xend+1,ystart,zstart) = 0.0 @@ -621,7 +626,7 @@ subroutine formResidual(da_local,x_local, & f_scal(0:2,xstart,yend+1,zend+1) = 0.0 f_scal(0:2,xend+1,yend+1,zend+1) = 0.0 endif - call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,ierr);CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,f_local,f_scal,err_PETSc);CHKERRQ(err_PETSc) end subroutine formResidual @@ -629,7 +634,7 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM stiffness matrix !-------------------------------------------------------------------------------------------------- -subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) +subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc) DM :: da_local Vec :: x_local, coordinates @@ -643,15 +648,17 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) PetscScalar :: diag PetscObject :: dummy MatNullSpace :: matnull - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc BMatFull = 0.0 BMatFull(1:3,1 :8 ) = BMat BMatFull(4:6,9 :16) = BMat BMatFull(7:9,17:24) = BMat - call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,ierr); CHKERRQ(ierr) - call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,ierr); CHKERRQ(ierr) - call MatZeroEntries(Jac,ierr); CHKERRQ(ierr) + call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,err_PETSc) + CHKERRQ(err_PETSc) + call MatZeroEntries(Jac,err_PETSc); CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ctr = 0 @@ -686,34 +693,42 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,ierr) matmul(transpose(BMatFull), & matmul(reshape(reshape(homogenization_dPdF(1:3,1:3,1:3,1:3,ele), & shape=[3,3,3,3], order=[2,1,4,3]),shape=[9,9]),BMatFull))*detJ - call MatSetValuesStencil(Jac,24_pPETScInt,row,24_pPetscInt,col,K_ele,ADD_VALUES,ierr) - CHKERRQ(ierr) + call MatSetValuesStencil(Jac,24_pPETScInt,row,24_pPetscInt,col,K_ele,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) enddo; enddo; enddo - call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) + call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! applying boundary conditions diag = (C_volAvg(1,1,1,1)/delta(1)**2 + & C_volAvg(2,2,2,2)/delta(2)**2 + & C_volAvg(3,3,3,3)/delta(3)**2)*detJ - call MatZeroRowsColumns(Jac,size(rows,kind=pPetscInt),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,ierr) - CHKERRQ(ierr) - call DMGetGlobalVector(da_local,coordinates,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(da_local,coordinates,x_scal,ierr); CHKERRQ(ierr) + call MatZeroRowsColumns(Jac,size(rows,kind=pPetscInt),rows,diag,PETSC_NULL_VEC,PETSC_NULL_VEC,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetGlobalVector(da_local,coordinates,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(da_local,coordinates,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ele = 0 do k = zstart, zend; do j = ystart, yend; do i = xstart, xend ele = ele + 1 x_scal(0:2,i,j,k) = discretization_IPcoords(1:3,ele) enddo; enddo; enddo - call DMDAVecRestoreArrayF90(da_local,coordinates,x_scal,ierr); CHKERRQ(ierr) ! initialize to undeformed coordinates (ToDo: use ip coordinates) - call MatNullSpaceCreateRigidBody(coordinates,matnull,ierr); CHKERRQ(ierr) ! get rigid body deformation modes - call DMRestoreGlobalVector(da_local,coordinates,ierr); CHKERRQ(ierr) - call MatSetNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatSetNearNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatNullSpaceDestroy(matnull,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da_local,coordinates,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ! initialize to undeformed coordinates (ToDo: use ip coordinates) + call MatNullSpaceCreateRigidBody(coordinates,matnull,err_PETSc) + CHKERRQ(err_PETSc) ! get rigid body deformation modes + call DMRestoreGlobalVector(da_local,coordinates,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetNullSpace(Jac,matnull,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetNearNullSpace(Jac,matnull,err_PETSc) + CHKERRQ(err_PETSc) + call MatNullSpaceDestroy(matnull,err_PETSc) + CHKERRQ(err_PETSc) end subroutine formJacobian diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 0a267ecca..2d5376781 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -97,7 +97,8 @@ contains subroutine grid_mechanical_spectral_basic_init real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & F ! pointer to solution data PetscInt, dimension(0:worldsize-1) :: localK @@ -145,10 +146,10 @@ subroutine grid_mechanical_spectral_basic_init !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -157,11 +158,12 @@ subroutine grid_mechanical_spectral_basic_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(snes,'mechanical_',err_PETSc);CHKERRQ(err_PETSc) localK = 0_pPetscInt localK(worldrank) = int(grid3,pPetscInt) - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -169,21 +171,21 @@ subroutine grid_mechanical_spectral_basic_init 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap) [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid - da,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(da,ierr); CHKERRQ(ierr) - call DMsetUp(da,ierr); CHKERRQ(ierr) - call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 9, i.e. every def grad tensor) - call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" - CHKERRQ(ierr) - call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + da,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(snes,da,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(da,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(da,err_PETSc); CHKERRQ(err_PETSc) + call DMcreateGlobalVector(da,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 9, i.e. every def grad tensor) + call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "converged" + CHKERRQ(err_PETSc) + call SNESsetFromOptions(snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! places pointer on PETSc data + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) ! places pointer on PETSc data restartRead: if (interface_restartInc > 0) then print'(/,1x,a,i0,a)', 'reading restart data of increment ', interface_restartInc, ' from file' @@ -192,17 +194,17 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -216,24 +218,27 @@ subroutine grid_mechanical_spectral_basic_init call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! deassociate pointer + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) ! deassociate pointer restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & - MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) - call MPI_File_close(fileUnit,ierr) + MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_close(fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if restartRead2 call utilities_updateGamma(C_minMaxAvg) @@ -255,7 +260,7 @@ function grid_mechanical_spectral_basic_solution(incInfoIn) result(solution) solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -267,11 +272,11 @@ function grid_mechanical_spectral_basic_solution(incInfoIn) result(solution) !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) + call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) + call SNESGetConvergedReason(snes,reason,err_PETSc); CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -301,11 +306,11 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) if (cutBack) then C_volAvg = C_volAvgLastInc @@ -314,7 +319,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ C_volAvgLastInc = C_volAvg C_minMaxAvgLastInc = C_minMaxAvg - F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components + F_aimDot = merge(merge(.0_pReal,(F_aim-F_aim_lastInc)/Delta_t_old,stress_BC%mask),.0_pReal,guess) ! estimate deformation rate for prescribed stress components F_aim_lastInc = F_aim !----------------------------------------------------------------------------------------------- @@ -348,7 +353,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_ F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3]) - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -364,12 +369,12 @@ end subroutine grid_mechanical_spectral_basic_forward !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_basic_updateCoords - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, dimension(:,:,:,:), pointer :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) call utilities_updateCoords(F) - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_basic_updateCoords @@ -379,11 +384,11 @@ end subroutine grid_mechanical_spectral_basic_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_basic_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: F - call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) print'(1x,a)', 'writing solver data required for restart to file'; flush(IO_STDOUT) @@ -410,7 +415,7 @@ subroutine grid_mechanical_spectral_basic_restartWrite if (num%update_gamma) call utilities_saveReferenceStiffness - call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc); CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_basic_restartWrite @@ -418,7 +423,7 @@ end subroutine grid_mechanical_spectral_basic_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -428,7 +433,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm devNull3 SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & divTol, & BCTol @@ -460,7 +465,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(in, F, & - residuum, dummy, ierr) + residuum, dummy, err_PETSc) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: in !< DMDA info (needs to be named "in" for macros like XRANGE to work) PetscScalar, dimension(3,3,XG_RANGE,YG_RANGE,ZG_RANGE), & @@ -473,10 +478,11 @@ subroutine formResidual(in, F, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI - call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) - call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(snes,nfuncs,err_PETSc); CHKERRQ(err_PETSc) + call SNESGetIterationNumber(snes,PETScIter,err_PETSc); CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -497,7 +503,8 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 5bfce5e3f..a6ce8d173 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -108,7 +108,8 @@ contains subroutine grid_mechanical_spectral_polarisation_init real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscScalar, pointer, dimension(:,:,:,:) :: & FandF_tau, & ! overall pointer to solution data F, & ! specific (sub)pointer @@ -163,10 +164,10 @@ subroutine grid_mechanical_spectral_polarisation_init !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type ngmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! allocate global fields @@ -177,11 +178,12 @@ subroutine grid_mechanical_spectral_polarisation_init !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(snes,'mechanical_',ierr);CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(snes,'mechanical_',err_PETSc);CHKERRQ(err_PETSc) localK = 0_pPetscInt localK(worldrank) = int(grid3,pPetscInt) - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3d(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -189,21 +191,21 @@ subroutine grid_mechanical_spectral_polarisation_init 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap) [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid - da,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(snes,da,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(da,ierr); CHKERRQ(ierr) - call DMsetUp(da,ierr); CHKERRQ(ierr) - call DMcreateGlobalVector(da,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 18, i.e. every def grad tensor) - call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,ierr) ! specify custom convergence check function "converged" - CHKERRQ(ierr) - call SNESsetFromOptions(snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + da,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(snes,da,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(da,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(da,err_PETSc); CHKERRQ(err_PETSc) + call DMcreateGlobalVector(da,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 18, i.e. every def grad tensor) + call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESsetConvergenceTest(snes,converged,PETSC_NULL_SNES,PETSC_NULL_FUNCTION,err_PETSc) ! specify custom convergence check function "converged" + CHKERRQ(err_PETSc) + call SNESsetFromOptions(snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! places pointer on PETSc data + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) ! places pointer on PETSc data F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -214,17 +216,17 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') call HDF5_read(F_tau,groupHandle,'F_tau') @@ -242,24 +244,28 @@ subroutine grid_mechanical_spectral_polarisation_init call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 reshape(F,shape(F_lastInc)), & ! target F 0.0_pReal) ! time increment - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) ! deassociate pointer + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) ! deassociate pointer + CHKERRQ(err_PETSc) restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,ierr) - if (ierr /=0) error stop 'MPI error' + call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) call HDF5_closeFile(fileHandle) call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & - MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,ierr) - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,ierr) - call MPI_File_close(fileUnit,ierr) + MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_File_close(fileUnit,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end if restartRead2 call utilities_updateGamma(C_minMaxAvg) @@ -283,7 +289,7 @@ function grid_mechanical_spectral_polarisation_solution(incInfoIn) result(soluti solution !-------------------------------------------------------------------------------------------------- ! PETSc Data - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -299,11 +305,11 @@ function grid_mechanical_spectral_polarisation_solution(incInfoIn) result(soluti !-------------------------------------------------------------------------------------------------- ! solve BVP - call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) + call SNESsolve(snes,PETSC_NULL_VEC,solution_vec,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! check convergence - call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr) + call SNESGetConvergedReason(snes,reason,err_PETSc); CHKERRQ(err_PETSc) solution%converged = reason > 0 solution%iterationsNeeded = totalIter @@ -333,13 +339,13 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D deformation_BC type(rotation), intent(in) :: & rotation_BC - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, pointer, dimension(:,:,:,:) :: FandF_tau, F, F_tau integer :: i, j, k real(pReal), dimension(3,3) :: F_lambda33 - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -402,7 +408,8 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D end do; end do; end do end if - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! set module wide available data @@ -418,12 +425,14 @@ end subroutine grid_mechanical_spectral_polarisation_forward !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_polarisation_updateCoords - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) call utilities_updateCoords(FandF_tau(0:8,:,:,:)) - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_polarisation_updateCoords @@ -433,11 +442,11 @@ end subroutine grid_mechanical_spectral_polarisation_updateCoords !-------------------------------------------------------------------------------------------------- subroutine grid_mechanical_spectral_polarisation_restartWrite - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau, F, F_tau - call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,err_PETSc); CHKERRQ(err_PETSc) F => FandF_tau(0: 8,:,:,:) F_tau => FandF_tau(9:17,:,:,:) @@ -467,7 +476,8 @@ subroutine grid_mechanical_spectral_polarisation_restartWrite if (num%update_gamma) call utilities_saveReferenceStiffness - call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(da,solution_vec,FandF_tau,err_PETSc) + CHKERRQ(err_PETSc) end subroutine grid_mechanical_spectral_polarisation_restartWrite @@ -475,7 +485,7 @@ end subroutine grid_mechanical_spectral_polarisation_restartWrite !-------------------------------------------------------------------------------------------------- !> @brief convergence check !-------------------------------------------------------------------------------------------------- -subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,ierr) +subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dummy,err_PETSc) SNES :: snes_local PetscInt, intent(in) :: PETScIter @@ -485,7 +495,7 @@ subroutine converged(snes_local,PETScIter,devNull1,devNull2,devNull3,reason,dumm devNull3 SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal) :: & curlTol, & divTol, & @@ -521,7 +531,7 @@ end subroutine converged !> @brief forms the residual vector !-------------------------------------------------------------------------------------------------- subroutine formResidual(in, FandF_tau, & - residuum, dummy,ierr) + residuum, dummy,err_PETSc) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: in !< DMDA info (needs to be named "in" for macros like XRANGE to work) PetscScalar, dimension(3,3,2,XG_RANGE,YG_RANGE,ZG_RANGE), & @@ -537,8 +547,9 @@ subroutine formResidual(in, FandF_tau, & PETScIter, & nfuncs PetscObject :: dummy - PetscErrorCode :: ierr - integer :: & + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI + integer :: & i, j, k, e !--------------------------------------------------------------------------------------------------- @@ -553,10 +564,11 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call SNESGetNumberFunctionEvals(snes,nfuncs,ierr); CHKERRQ(ierr) - call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr) + call SNESGetNumberFunctionEvals(snes,nfuncs,err_PETSc); CHKERRQ(err_PETSc) + call SNESGetIterationNumber(snes,PETScIter,err_PETSc); CHKERRQ(err_PETSc) if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1 ! new increment @@ -597,7 +609,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/mesh/DAMASK_mesh.f90 b/src/mesh/DAMASK_mesh.f90 index 14872bf15..aa156ec2d 100644 --- a/src/mesh/DAMASK_mesh.f90 +++ b/src/mesh/DAMASK_mesh.f90 @@ -78,7 +78,7 @@ program DAMASK_mesh type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases type(tSolutionState), allocatable, dimension(:) :: solres PetscInt :: faceSet, currentFaceSet, dimPlex - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc integer(kind(COMPONENT_UNDEFINED_ID)) :: ID external :: & quit @@ -98,8 +98,8 @@ program DAMASK_mesh if (maxCutBack < 0) call IO_error(301,ext_msg='maxCutBack') ! reading basic information from load case file and allocate data structure containing load cases - call DMGetDimension(geomMesh,dimPlex,ierr) !< dimension of mesh (2D or 3D) - CHKERRA(ierr) + call DMGetDimension(geomMesh,dimPlex,err_PETSc) !< dimension of mesh (2D or 3D) + CHKERRA(err_PETSc) allocate(solres(1)) !-------------------------------------------------------------------------------------------------- diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index 86fb52b36..ce97e7776 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -92,7 +92,7 @@ subroutine FEM_utilities_init p_i !< integration order (quadrature rule) character(len=*), parameter :: & PETSCDEBUG = ' -snes_view -snes_monitor ' - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc logical :: debugPETSc !< use some in debug defined options for more verbose PETSc solution @@ -116,20 +116,20 @@ subroutine FEM_utilities_init trim(PETScDebug), & 'add more using the "PETSc_options" keyword in numerics.yaml' flush(IO_STDOUT) - call PetscOptionsClear(PETSC_NULL_OPTIONS,ierr) - CHKERRQ(ierr) - if(debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),ierr) - CHKERRQ(ierr) + call PetscOptionsClear(PETSC_NULL_OPTIONS,err_PETSc) + CHKERRQ(err_PETSc) + if(debugPETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(PETSCDEBUG),err_PETSc) + CHKERRQ(err_PETSc) call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-mechanical_snes_type newtonls & &-mechanical_snes_linesearch_type cp -mechanical_snes_ksp_ew & &-mechanical_snes_ksp_ew_rtol0 0.01 -mechanical_snes_ksp_ew_rtolmax 0.01 & - &-mechanical_ksp_type fgmres -mechanical_ksp_max_it 25', ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_mesh%get_asString('PETSc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-mechanical_ksp_type fgmres -mechanical_ksp_max_it 25', err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_mesh%get_asString('PETSc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) write(petsc_optionsOrder,'(a,i0)') '-mechFE_petscspace_degree ', p_s - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_optionsOrder),ierr) - CHKERRQ(ierr) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_optionsOrder),err_PETSc) + CHKERRQ(err_PETSc) wgt = 1.0/real(mesh_maxNips*mesh_NcpElemsGlobal,pReal) @@ -144,10 +144,9 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) real(pReal), intent(in) :: timeinc !< loading time logical, intent(in) :: forwardData !< age results - real(pReal),intent(out), dimension(3,3) :: P_av !< average PK stress - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI print'(/,1x,a)', '... evaluating constitutive response ......................................' @@ -157,7 +156,9 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + end subroutine utilities_constitutiveResponse @@ -174,26 +175,29 @@ subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCVa PetscInt, pointer :: bcPoints(:) PetscScalar, pointer :: localArray(:) PetscScalar :: BCValue,BCDotValue,timeinc - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc - call PetscSectionGetFieldComponents(section,field,numComp,ierr); CHKERRQ(ierr) - call ISGetSize(bcPointsIS,nBcPoints,ierr); CHKERRQ(ierr) - if (nBcPoints > 0) call ISGetIndicesF90(bcPointsIS,bcPoints,ierr) - call VecGetArrayF90(localVec,localArray,ierr); CHKERRQ(ierr) + call PetscSectionGetFieldComponents(section,field,numComp,err_PETSc) + CHKERRQ(err_PETSc) + call ISGetSize(bcPointsIS,nBcPoints,err_PETSc) + CHKERRQ(err_PETSc) + if (nBcPoints > 0) call ISGetIndicesF90(bcPointsIS,bcPoints,err_PETSc) + call VecGetArrayF90(localVec,localArray,err_PETSc); CHKERRQ(err_PETSc) do point = 1, nBcPoints - call PetscSectionGetFieldDof(section,bcPoints(point),field,numDof,ierr) - CHKERRQ(ierr) - call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,ierr) - CHKERRQ(ierr) + call PetscSectionGetFieldDof(section,bcPoints(point),field,numDof,err_PETSc) + CHKERRQ(err_PETSc) + call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,err_PETSc) + CHKERRQ(err_PETSc) do dof = offset+comp+1, offset+numDof, numComp localArray(dof) = localArray(dof) + BCValue + BCDotValue*timeinc end do end do - call VecRestoreArrayF90(localVec,localArray,ierr); CHKERRQ(ierr) - call VecAssemblyBegin(localVec, ierr); CHKERRQ(ierr) - call VecAssemblyEnd (localVec, ierr); CHKERRQ(ierr) - if (nBcPoints > 0) call ISRestoreIndicesF90(bcPointsIS,bcPoints,ierr) + call VecRestoreArrayF90(localVec,localArray,err_PETSc); CHKERRQ(err_PETSc) + call VecAssemblyBegin(localVec, err_PETSc); CHKERRQ(err_PETSc) + call VecAssemblyEnd (localVec, err_PETSc); CHKERRQ(err_PETSc) + if (nBcPoints > 0) call ISRestoreIndicesF90(bcPointsIS,bcPoints,err_PETSc) + CHKERRQ(err_PETSc) end subroutine utilities_projectBCValues diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index 2446bc0de..172101976 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -80,7 +80,8 @@ subroutine discretization_mesh_init(restart) PetscInt :: nFaceSets PetscInt, pointer, dimension(:) :: pFaceSets IS :: faceSetIS - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI integer, dimension(:), allocatable :: & materialAt class(tNode), pointer :: & @@ -100,56 +101,60 @@ subroutine discretization_mesh_init(restart) debug_element = config_debug%get_asInt('element',defaultVal=1) debug_ip = config_debug%get_asInt('integrationpoint',defaultVal=1) - call DMPlexCreateFromFile(PETSC_COMM_WORLD,interface_geomFile,PETSC_TRUE,globalMesh,ierr) - CHKERRQ(ierr) - call DMGetDimension(globalMesh,dimPlex,ierr) - CHKERRQ(ierr) - call DMGetStratumSize(globalMesh,'depth',dimPlex,mesh_NcpElemsGlobal,ierr) - CHKERRQ(ierr) + call DMPlexCreateFromFile(PETSC_COMM_WORLD,interface_geomFile,PETSC_TRUE,globalMesh,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetDimension(globalMesh,dimPlex,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetStratumSize(globalMesh,'depth',dimPlex,mesh_NcpElemsGlobal,err_PETSc) + CHKERRQ(err_PETSc) print'()' - call DMView(globalMesh, PETSC_VIEWER_STDOUT_WORLD,ierr) - CHKERRQ(ierr) + call DMView(globalMesh, PETSC_VIEWER_STDOUT_WORLD,err_PETSc) + CHKERRQ(err_PETSc) ! get number of IDs in face sets (for boundary conditions?) - call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr) - CHKERRQ(ierr) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,err_PETSc) + CHKERRQ(err_PETSc) + call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' + call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (worldrank == 0) then - call DMClone(globalMesh,geomMesh,ierr) + call DMClone(globalMesh,geomMesh,err_PETSc) else - call DMPlexDistribute(globalMesh,0,sf,geomMesh,ierr) + call DMPlexDistribute(globalMesh,0,sf,geomMesh,err_PETSc) endif - CHKERRQ(ierr) + CHKERRQ(err_PETSc) allocate(mesh_boundaries(mesh_Nboundaries), source = 0) - call DMGetLabelSize(globalMesh,'Face Sets',nFaceSets,ierr) - CHKERRQ(ierr) - call DMGetLabelIdIS(globalMesh,'Face Sets',faceSetIS,ierr) - CHKERRQ(ierr) + call DMGetLabelSize(globalMesh,'Face Sets',nFaceSets,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetLabelIdIS(globalMesh,'Face Sets',faceSetIS,err_PETSc) + CHKERRQ(err_PETSc) if (nFaceSets > 0) then - call ISGetIndicesF90(faceSetIS,pFaceSets,ierr) - CHKERRQ(ierr) + call ISGetIndicesF90(faceSetIS,pFaceSets,err_PETSc) + CHKERRQ(err_PETSc) mesh_boundaries(1:nFaceSets) = pFaceSets - CHKERRQ(ierr) - call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr) + CHKERRQ(err_PETSc) + call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call DMDestroy(globalMesh,ierr); CHKERRQ(ierr) + call DMDestroy(globalMesh,err_PETSc); CHKERRQ(err_PETSc) - call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,ierr) - CHKERRQ(ierr) - call DMGetStratumSize(geomMesh,'depth',0,mesh_Nnodes,ierr) - CHKERRQ(ierr) + call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetStratumSize(geomMesh,'depth',0,mesh_Nnodes,err_PETSc) + CHKERRQ(err_PETSc) ! Get initial nodal coordinates - call DMGetCoordinates(geomMesh,coords_node0,ierr) - CHKERRQ(ierr) - call VecGetArrayF90(coords_node0, mesh_node0_temp,ierr) - CHKERRQ(ierr) + call DMGetCoordinates(geomMesh,coords_node0,err_PETSc) + CHKERRQ(err_PETSc) + call VecGetArrayF90(coords_node0, mesh_node0_temp,err_PETSc) + CHKERRQ(err_PETSc) mesh_maxNips = FEM_nQuadrature(dimPlex,p_i) @@ -158,8 +163,8 @@ subroutine discretization_mesh_init(restart) allocate(materialAt(mesh_NcpElems)) do j = 1, mesh_NcpElems - call DMGetLabelValue(geomMesh,'Cell Sets',j-1,materialAt(j),ierr) - CHKERRQ(ierr) + call DMGetLabelValue(geomMesh,'Cell Sets',j-1,materialAt(j),err_PETSc) + CHKERRQ(err_PETSc) enddo materialAt = materialAt + 1 @@ -188,16 +193,17 @@ subroutine mesh_FEM_build_ipVolumes(dimPlex) PetscReal :: vol PetscReal, pointer,dimension(:) :: pCent, pNorm PetscInt :: cellStart, cellEnd, cell - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc allocate(mesh_ipVolume(mesh_maxNips,mesh_NcpElems),source=0.0_pReal) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) allocate(pCent(dimPlex)) allocate(pNorm(dimPlex)) do cell = cellStart, cellEnd-1 - call DMPlexComputeCellGeometryFVM(geomMesh,cell,vol,pCent,pNorm,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryFVM(geomMesh,cell,vol,pCent,pNorm,err_PETSc) + CHKERRQ(err_PETSc) mesh_ipVolume(:,cell+1) = vol/real(mesh_maxNips,pReal) enddo @@ -215,7 +221,7 @@ subroutine mesh_FEM_build_ipCoordinates(dimPlex,qPoints) PetscReal, pointer,dimension(:) :: pV0, pCellJ, pInvcellJ PetscReal :: detJ PetscInt :: cellStart, cellEnd, cell, qPt, dirI, dirJ, qOffset - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc allocate(mesh_ipCoordinates(3,mesh_maxNips,mesh_NcpElems),source=0.0_pReal) @@ -223,10 +229,11 @@ subroutine mesh_FEM_build_ipCoordinates(dimPlex,qPoints) allocate(pV0(dimPlex)) allocatE(pCellJ(dimPlex**2)) allocatE(pinvCellJ(dimPlex**2)) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexComputeCellGeometryAffineFEM(geomMesh,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryAffineFEM(geomMesh,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) qOffset = 0 do qPt = 1, mesh_maxNips do dirI = 1, dimPlex diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index c1c0d8e3d..bb7bfaece 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -108,7 +108,7 @@ subroutine FEM_mechanical_init(fieldBC) PetscScalar, allocatable, target :: x_scal(:) character(len=*), parameter :: prefix = 'mechFE_' - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc real(pReal), dimension(3,3) :: devNull class(tNode), pointer :: & num_mesh @@ -130,8 +130,8 @@ subroutine FEM_mechanical_init(fieldBC) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech mesh - call DMClone(geomMesh,mechanical_mesh,ierr); CHKERRQ(ierr) - call DMGetDimension(mechanical_mesh,dimPlex,ierr); CHKERRQ(ierr) + call DMClone(geomMesh,mechanical_mesh,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDimension(mechanical_mesh,dimPlex,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech discretization @@ -140,35 +140,37 @@ subroutine FEM_mechanical_init(fieldBC) nQuadrature = FEM_nQuadrature( dimPlex,num%p_i) qPointsP => qPoints qWeightsP => qWeights - call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,ierr); CHKERRQ(ierr) - CHKERRQ(ierr) + call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,err_PETSc) + CHKERRQ(err_PETSc) nc = dimPlex - call PetscQuadratureSetData(mechQuad,dimPlex,nc,nQuadrature,qPointsP,qWeightsP,ierr) - CHKERRQ(ierr) + call PetscQuadratureSetData(mechQuad,dimPlex,nc,nQuadrature,qPointsP,qWeightsP,err_PETSc) + CHKERRQ(err_PETSc) call PetscFECreateDefault(PETSC_COMM_SELF,dimPlex,nc,PETSC_TRUE,prefix, & - num%p_i,mechFE,ierr); CHKERRQ(ierr) - call PetscFESetQuadrature(mechFE,mechQuad,ierr); CHKERRQ(ierr) - call PetscFEGetDimension(mechFE,nBasis,ierr); CHKERRQ(ierr) + num%p_i,mechFE,err_PETSc); CHKERRQ(err_PETSc) + call PetscFESetQuadrature(mechFE,mechQuad,err_PETSc); CHKERRQ(err_PETSc) + call PetscFEGetDimension(mechFE,nBasis,err_PETSc); CHKERRQ(err_PETSc) nBasis = nBasis/nc - call DMAddField(mechanical_mesh,PETSC_NULL_DMLABEL,mechFE,ierr); CHKERRQ(ierr) - call DMCreateDS(mechanical_mesh,ierr); CHKERRQ(ierr) - call DMGetDS(mechanical_mesh,mechDS,ierr); CHKERRQ(ierr) - call PetscDSGetTotalDimension(mechDS,cellDof,ierr); CHKERRQ(ierr) - call PetscFEDestroy(mechFE,ierr); CHKERRQ(ierr) - call PetscQuadratureDestroy(mechQuad,ierr); CHKERRQ(ierr) + call DMAddField(mechanical_mesh,PETSC_NULL_DMLABEL,mechFE,err_PETSc) + CHKERRQ(err_PETSc) + call DMCreateDS(mechanical_mesh,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(mechanical_mesh,mechDS,err_PETSc); CHKERRQ(err_PETSc) + call PetscDSGetTotalDimension(mechDS,cellDof,err_PETSc); CHKERRQ(err_PETSc) + call PetscFEDestroy(mechFE,err_PETSc); CHKERRQ(err_PETSc) + call PetscQuadratureDestroy(mechQuad,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! Setup FEM mech boundary conditions - call DMGetLabel(mechanical_mesh,'Face Sets',BCLabel,ierr); CHKERRQ(ierr) - call DMPlexLabelComplete(mechanical_mesh,BCLabel,ierr); CHKERRQ(ierr) - call DMGetLocalSection(mechanical_mesh,section,ierr); CHKERRQ(ierr) + call DMGetLabel(mechanical_mesh,'Face Sets',BCLabel,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexLabelComplete(mechanical_mesh,BCLabel,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalSection(mechanical_mesh,section,err_PETSc); CHKERRQ(err_PETSc) allocate(pnumComp(1), source=dimPlex) allocate(pnumDof(0:dimPlex), source = 0) do topologDim = 0, dimPlex - call DMPlexGetDepthStratum(mechanical_mesh,topologDim,cellStart,cellEnd,ierr) - CHKERRQ(ierr) - call PetscSectionGetDof(section,cellStart,pnumDof(topologDim),ierr) - CHKERRQ(ierr) + call DMPlexGetDepthStratum(mechanical_mesh,topologDim,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call PetscSectionGetDof(section,cellStart,pnumDof(topologDim),err_PETSc) + CHKERRQ(err_PETSc) enddo numBC = 0 do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries @@ -181,55 +183,61 @@ subroutine FEM_mechanical_init(fieldBC) do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(faceSet)) then numBC = numBC + 1 - call ISCreateGeneral(PETSC_COMM_WORLD,1,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),ierr) - CHKERRQ(ierr) - call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,ierr) - CHKERRQ(ierr) + call ISCreateGeneral(PETSC_COMM_WORLD,1,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc) + CHKERRQ(err_PETSc) + call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,err_PETSc) + CHKERRQ(err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcPoint,ierr) - CHKERRQ(ierr) - call ISGetIndicesF90(bcPoint,pBcPoint,ierr); CHKERRQ(ierr) - call ISCreateGeneral(PETSC_COMM_WORLD,bcSize,pBcPoint,PETSC_COPY_VALUES,pbcPoints(numBC),ierr) - CHKERRQ(ierr) - call ISRestoreIndicesF90(bcPoint,pBcPoint,ierr); CHKERRQ(ierr) - call ISDestroy(bcPoint,ierr); CHKERRQ(ierr) + call DMGetStratumIS(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcPoint,err_PETSc) + CHKERRQ(err_PETSc) + call ISGetIndicesF90(bcPoint,pBcPoint,err_PETSc); CHKERRQ(err_PETSc) + call ISCreateGeneral(PETSC_COMM_WORLD,bcSize,pBcPoint,PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) + CHKERRQ(err_PETSc) + call ISRestoreIndicesF90(bcPoint,pBcPoint,err_PETSc); CHKERRQ(err_PETSc) + call ISDestroy(bcPoint,err_PETSc); CHKERRQ(err_PETSc) else - call ISCreateGeneral(PETSC_COMM_WORLD,0,[0],PETSC_COPY_VALUES,pbcPoints(numBC),ierr) - CHKERRQ(ierr) + call ISCreateGeneral(PETSC_COMM_WORLD,0,[0],PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) + CHKERRQ(err_PETSc) endif endif enddo; enddo call DMPlexCreateSection(mechanical_mesh,nolabel,pNumComp,pNumDof, & - numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,ierr) - CHKERRQ(ierr) - call DMSetSection(mechanical_mesh,section,ierr); CHKERRQ(ierr) + numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS,section,err_PETSc) + CHKERRQ(err_PETSc) + call DMSetSection(mechanical_mesh,section,err_PETSc); CHKERRQ(err_PETSc) do faceSet = 1, numBC - call ISDestroy(pbcPoints(faceSet),ierr); CHKERRQ(ierr) + call ISDestroy(pbcPoints(faceSet),err_PETSc); CHKERRQ(err_PETSc) enddo !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,ierr);CHKERRQ(ierr) - call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',ierr);CHKERRQ(ierr) - call SNESSetDM(mechanical_snes,mechanical_mesh,ierr); CHKERRQ(ierr) !< set the mesh for non-linear solver - call DMCreateGlobalVector(mechanical_mesh,solution ,ierr); CHKERRQ(ierr) !< locally owned displacement Dofs - call DMCreateGlobalVector(mechanical_mesh,solution_rate ,ierr); CHKERRQ(ierr) !< locally owned velocity Dofs to guess solution at next load step - call DMCreateLocalVector (mechanical_mesh,solution_local ,ierr); CHKERRQ(ierr) !< locally owned velocity Dofs to guess solution at next load step - call DMSNESSetFunctionLocal(mechanical_mesh,FEM_mechanical_formResidual,PETSC_NULL_VEC,ierr) !< function to evaluate residual forces - CHKERRQ(ierr) - call DMSNESSetJacobianLocal(mechanical_mesh,FEM_mechanical_formJacobian,PETSC_NULL_VEC,ierr) !< function to evaluate stiffness matrix - CHKERRQ(ierr) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), ierr); CHKERRQ(ierr) !< ignore linear solve failures - call SNESSetConvergenceTest(mechanical_snes,FEM_mechanical_converged,PETSC_NULL_VEC,PETSC_NULL_FUNCTION,ierr) - CHKERRQ(ierr) - call SNESSetTolerances(mechanical_snes,1.0,0.0,0.0,num%itmax,num%itmax,ierr) - CHKERRQ(ierr) - call SNESSetFromOptions(mechanical_snes,ierr); CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,mechanical_snes,err_PETSc);CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(mechanical_snes,'mechanical_',err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetDM(mechanical_snes,mechanical_mesh,err_PETSc) ! set the mesh for non-linear solver + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_mesh,solution, err_PETSc) ! locally owned displacement Dofs + CHKERRQ(err_PETSc) + call DMCreateGlobalVector(mechanical_mesh,solution_rate, err_PETSc) ! locally owned velocity Dofs to guess solution at next load step + CHKERRQ(err_PETSc) + call DMCreateLocalVector (mechanical_mesh,solution_local,err_PETSc) ! locally owned velocity Dofs to guess solution at next load step + CHKERRQ(err_PETSc) + call DMSNESSetFunctionLocal(mechanical_mesh,FEM_mechanical_formResidual,PETSC_NULL_VEC,err_PETSc) ! function to evaluate residual forces + CHKERRQ(err_PETSc) + call DMSNESSetJacobianLocal(mechanical_mesh,FEM_mechanical_formJacobian,PETSC_NULL_VEC,err_PETSc) ! function to evaluate stiffness matrix + CHKERRQ(err_PETSc) + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), err_PETSc) ! ignore linear solve failures CHKERRQ(err_PETSc) !< ignore linear solve failures + CHKERRQ(err_PETSc) + call SNESSetConvergenceTest(mechanical_snes,FEM_mechanical_converged,PETSC_NULL_VEC,PETSC_NULL_FUNCTION,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetTolerances(mechanical_snes,1.0,0.0,0.0,num%itmax,num%itmax,err_PETSc) + CHKERRQ(err_PETSc) + call SNESSetFromOptions(mechanical_snes,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! init fields - call VecSet(solution ,0.0,ierr); CHKERRQ(ierr) - call VecSet(solution_rate ,0.0,ierr); CHKERRQ(ierr) + call VecSet(solution ,0.0,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(solution_rate ,0.0,err_PETSc); CHKERRQ(err_PETSc) allocate(x_scal(cellDof)) allocate(nodalWeightsP(1)) allocate(nodalPointsP(dimPlex)) @@ -237,26 +245,26 @@ subroutine FEM_mechanical_init(fieldBC) allocate(pcellJ(dimPlex**2)) allocate(pinvcellJ(dimPlex**2)) allocate(cellJMat(dimPlex,dimPlex)) - call PetscDSGetDiscretization(mechDS,0,mechFE,ierr) - CHKERRQ(ierr) - call PetscFEGetDualSpace(mechFE,mechDualSpace,ierr); CHKERRQ(ierr) - call DMPlexGetHeightStratum(mechanical_mesh,0,cellStart,cellEnd,ierr) - CHKERRQ(ierr) + call PetscDSGetDiscretization(mechDS,0,mechFE,err_PETSc) + CHKERRQ(err_PETSc) + call PetscFEGetDualSpace(mechFE,mechDualSpace,err_PETSc); CHKERRQ(err_PETSc) + call DMPlexGetHeightStratum(mechanical_mesh,0,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements x_scal = 0.0_pReal - call DMPlexComputeCellGeometryAffineFEM(mechanical_mesh,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexComputeCellGeometryAffineFEM(mechanical_mesh,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) cellJMat = reshape(pCellJ,shape=[dimPlex,dimPlex]) do basis = 0, nBasis*dimPlex-1, dimPlex - call PetscDualSpaceGetFunctional(mechDualSpace,basis,functional,ierr) - CHKERRQ(ierr) - call PetscQuadratureGetData(functional,dimPlex,nc,nNodalPoints,nodalPointsP,nodalWeightsP,ierr) - CHKERRQ(ierr) + call PetscDualSpaceGetFunctional(mechDualSpace,basis,functional,err_PETSc) + CHKERRQ(err_PETSc) + call PetscQuadratureGetData(functional,dimPlex,nc,nNodalPoints,nodalPointsP,nodalWeightsP,err_PETSc) + CHKERRQ(err_PETSc) x_scal(basis+1:basis+dimPlex) = pV0 + matmul(transpose(cellJMat),nodalPointsP + 1.0_pReal) enddo px_scal => x_scal - call DMPlexVecSetClosure(mechanical_mesh,section,solution_local,cell,px_scal,5,ierr) - CHKERRQ(ierr) + call DMPlexVecSetClosure(mechanical_mesh,section,solution_local,cell,px_scal,5,err_PETSc) + CHKERRQ(err_PETSc) enddo call utilities_constitutiveResponse(0.0_pReal,devNull,.true.) @@ -279,7 +287,7 @@ type(tSolutionState) function FEM_mechanical_solution( & character(len=*), intent(in) :: & incInfoIn - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason incInfo = incInfoIn @@ -289,8 +297,10 @@ type(tSolutionState) function FEM_mechanical_solution( & params%timeinc = timeinc params%fieldBC = fieldBC - call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,ierr); CHKERRQ(ierr) ! solve mechanical_snes based on solution guess (result in solution) - call SNESGetConvergedReason(mechanical_snes,reason,ierr); CHKERRQ(ierr) ! solution converged? + call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,err_PETSc) ! solve mechanical_snes based on solution guess (result in solution) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(mechanical_snes,reason,err_PETSc) ! solution converged? + CHKERRQ(err_PETSc) terminallyIll = .false. if (reason < 1) then ! 0: still iterating (will not occur), negative -> convergence error @@ -298,8 +308,8 @@ type(tSolutionState) function FEM_mechanical_solution( & FEM_mechanical_solution%iterationsNeeded = num%itmax else ! >= 1 proper convergence (or terminally ill) FEM_mechanical_solution%converged = .true. - call SNESGetIterationNumber(mechanical_snes,FEM_mechanical_solution%iterationsNeeded,ierr) - CHKERRQ(ierr) + call SNESGetIterationNumber(mechanical_snes,FEM_mechanical_solution%iterationsNeeded,err_PETSc) + CHKERRQ(err_PETSc) endif print'(/,1x,a)', '===========================================================================' @@ -311,11 +321,12 @@ end function FEM_mechanical_solution !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM residual vector !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) +subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc) DM :: dm_local PetscObject,intent(in) :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc + integer(MPI_INTEGER_KIND) :: err_MPI PetscDS :: prob Vec :: x_local, f_local, xx_local @@ -339,22 +350,23 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) allocate(pinvcellJ(dimPlex**2)) allocate(x_scal(cellDof)) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) - CHKERRQ(ierr) - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecWAXPY(x_local,1.0,xx_local,solution_local,ierr); CHKERRQ(ierr) + call DMGetLocalSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(dm_local,prob,err_PETSc); CHKERRQ(err_PETSc) + call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) + call VecWAXPY(x_local,1.0,xx_local,solution_local,err_PETSc); CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (params%fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo @@ -363,12 +375,12 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) ! evaluate field derivatives do cell = cellStart, cellEnd-1 !< loop over all elements - call PetscSectionGetNumFields(section,numFields,ierr) - CHKERRQ(ierr) - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call PetscSectionGetNumFields(section,numFields,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) IcellJMat = reshape(pInvcellJ,shape=[dimPlex,dimPlex]) do qPt = 0, nQuadrature-1 m = cell*nQuadrature + qPt+1 @@ -392,23 +404,24 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) enddo endif - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo !-------------------------------------------------------------------------------------------------- ! evaluate constitutive response call utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' ForwardData = .false. !-------------------------------------------------------------------------------------------------- ! integrating residual do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) IcellJMat = reshape(pInvcellJ,shape=[dimPlex,dimPlex]) f_scal = 0.0 do qPt = 0, nQuadrature-1 @@ -429,12 +442,12 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,ierr) enddo f_scal = f_scal*abs(detJ) pf_scal => f_scal - call DMPlexVecSetClosure(dm_local,section,f_local,cell,pf_scal,ADD_VALUES,ierr) - CHKERRQ(ierr) - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexVecSetClosure(dm_local,section,f_local,cell,pf_scal,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_formResidual @@ -442,13 +455,13 @@ end subroutine FEM_mechanical_formResidual !-------------------------------------------------------------------------------------------------- !> @brief forms the FEM stiffness matrix !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) +subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_PETSc) DM :: dm_local Mat :: Jac_pre, Jac PetscObject, intent(in) :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscDS :: prob Vec :: x_local, xx_local @@ -478,34 +491,43 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) allocate(pcellJ(dimPlex**2)) allocate(pinvcellJ(dimPlex**2)) - call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,ierr); CHKERRQ(ierr) - call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,ierr); CHKERRQ(ierr) - call MatZeroEntries(Jac,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetGlobalSection(dm_local,gSection,ierr); CHKERRQ(ierr) + call MatSetOption(Jac,MAT_KEEP_NONZERO_PATTERN,PETSC_TRUE,err_PETSc) + CHKERRQ(err_PETSc) + call MatSetOption(Jac,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE,err_PETSc) + CHKERRQ(err_PETSc) + call MatZeroEntries(Jac,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetDS(dm_local,prob,err_PETSc) + CHKERRQ(err_PETSc) + call PetscDSGetTabulation(prob,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) + call DMGetLocalSection(dm_local,section,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetGlobalSection(dm_local,gSection,err_PETSc) + CHKERRQ(err_PETSc) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecWAXPY(x_local,1.0_pReal,xx_local,solution_local,ierr); CHKERRQ(ierr) + call DMGetLocalVector(dm_local,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecWAXPY(x_local,1.0_pReal,xx_local,solution_local,err_PETSc) + CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (params%fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements - call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,ierr) !< get Dofs belonging to element - CHKERRQ(ierr) - call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,ierr) - CHKERRQ(ierr) + call DMPlexVecGetClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) !< get Dofs belonging to element + CHKERRQ(err_PETSc) + call DMPlexComputeCellGeometryAffineFEM(dm_local,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) + CHKERRQ(err_PETSc) K_eA = 0.0 K_eB = 0.0 MatB = 0.0 @@ -558,27 +580,27 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr) ! https://software.intel.com/en-us/forums/intel-fortran-compiler/topic/782230 (bug) allocate(pK_e(cellDOF**2),source = reshape(K_e,[cellDOF**2])) #endif - call DMPlexMatSetClosure(dm_local,section,gSection,Jac,cell,pK_e,ADD_VALUES,ierr) - CHKERRQ(ierr) - call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,ierr) - CHKERRQ(ierr) + call DMPlexMatSetClosure(dm_local,section,gSection,Jac,cell,pK_e,ADD_VALUES,err_PETSc) + CHKERRQ(err_PETSc) + call DMPlexVecRestoreClosure(dm_local,section,x_local,cell,x_scal,err_PETSc) + CHKERRQ(err_PETSc) enddo - call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,ierr); CHKERRQ(ierr) - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call MatAssemblyBegin(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyBegin(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call MatAssemblyEnd(Jac_pre,MAT_FINAL_ASSEMBLY,err_PETSc); CHKERRQ(err_PETSc) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! apply boundary conditions #if (PETSC_VERSION_MINOR < 14) - call DMPlexCreateRigidBody(dm_local,matnull,ierr); CHKERRQ(ierr) + call DMPlexCreateRigidBody(dm_local,matnull,err_PETSc); CHKERRQ(err_PETSc) #else - call DMPlexCreateRigidBody(dm_local,0,matnull,ierr); CHKERRQ(ierr) + call DMPlexCreateRigidBody(dm_local,0,matnull,err_PETSc); CHKERRQ(err_PETSc) #endif - call MatSetNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatSetNearNullSpace(Jac,matnull,ierr); CHKERRQ(ierr) - call MatNullSpaceDestroy(matnull,ierr); CHKERRQ(ierr) + call MatSetNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) + call MatSetNearNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) + call MatNullSpaceDestroy(matnull,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_formJacobian @@ -601,43 +623,43 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC) Vec :: x_local PetscSection :: section IS :: bcPoints - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc !-------------------------------------------------------------------------------------------------- ! forward last inc if (guess .and. .not. cutBack) then ForwardData = .True. homogenization_F0 = homogenization_F - call SNESGetDM(mechanical_snes,dm_local,ierr); CHKERRQ(ierr) !< retrieve mesh info from mechanical_snes into dm_local - call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call VecSet(x_local,0.0_pReal,ierr); CHKERRQ(ierr) - call DMGlobalToLocalBegin(dm_local,solution,INSERT_VALUES,x_local,ierr) !< retrieve my partition of global solution vector - CHKERRQ(ierr) - call DMGlobalToLocalEnd(dm_local,solution,INSERT_VALUES,x_local,ierr) - CHKERRQ(ierr) - call VecAXPY(solution_local,1.0,x_local,ierr); CHKERRQ(ierr) + call SNESGetDM(mechanical_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) !< retrieve mesh info from mechanical_snes into dm_local + call DMGetSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(x_local,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call DMGlobalToLocalBegin(dm_local,solution,INSERT_VALUES,x_local,err_PETSc) !< retrieve my partition of global solution vector + CHKERRQ(err_PETSc) + call DMGlobalToLocalEnd(dm_local,solution,INSERT_VALUES,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecAXPY(solution_local,1.0,x_local,err_PETSc); CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(face)) then - call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,ierr) + call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then - call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,ierr) - CHKERRQ(ierr) + call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) + CHKERRQ(err_PETSc) call utilities_projectBCValues(solution_local,section,0,field-1,bcPoints, & 0.0_pReal,fieldBC%componentBC(field)%Value(face),timeinc_old) - call ISDestroy(bcPoints,ierr); CHKERRQ(ierr) + call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif endif enddo; enddo - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! update rate and forward last inc - call VecCopy(solution,solution_rate,ierr); CHKERRQ(ierr) - call VecScale(solution_rate,1.0/timeinc_old,ierr); CHKERRQ(ierr) + call VecCopy(solution,solution_rate,err_PETSc); CHKERRQ(err_PETSc) + call VecScale(solution_rate,1.0/timeinc_old,err_PETSc); CHKERRQ(err_PETSc) endif - call VecCopy(solution_rate,solution,ierr); CHKERRQ(ierr) - call VecScale(solution,timeinc,ierr); CHKERRQ(ierr) + call VecCopy(solution_rate,solution,err_PETSc); CHKERRQ(err_PETSc) + call VecScale(solution,timeinc,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_forward @@ -645,20 +667,20 @@ end subroutine FEM_mechanical_forward !-------------------------------------------------------------------------------------------------- !> @brief reporting !-------------------------------------------------------------------------------------------------- -subroutine FEM_mechanical_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dummy,ierr) +subroutine FEM_mechanical_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dummy,err_PETSc) SNES :: snes_local PetscInt :: PETScIter PetscReal :: xnorm,snorm,fnorm,divTol SNESConvergedReason :: reason PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc !-------------------------------------------------------------------------------------------------- ! report divTol = max(maxval(abs(P_av(1:dimPlex,1:dimPlex)))*num%eps_struct_rtol,num%eps_struct_atol) - call SNESConvergedDefault(snes_local,PETScIter,xnorm,snorm,fnorm/divTol,reason,dummy,ierr) - CHKERRQ(ierr) + call SNESConvergedDefault(snes_local,PETScIter,xnorm,snorm,fnorm/divTol,reason,dummy,err_PETSc) + CHKERRQ(err_PETSc) if (terminallyIll) reason = SNES_DIVERGED_FUNCTION_DOMAIN print'(/,1x,a,a,i0,a,i0,f0.3)', trim(incInfo), & ' @ Iteration ',PETScIter,' mechanical residual norm = ', & @@ -690,7 +712,7 @@ subroutine FEM_mechanical_updateCoords() DM :: dm_local Vec :: x_local - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc PetscInt :: pStart, pEnd, p, s, e, q, & cellStart, cellEnd, c, n PetscSection :: section @@ -698,34 +720,37 @@ subroutine FEM_mechanical_updateCoords() PetscReal, dimension(:), pointer :: basisField, basisFieldDer PetscScalar, dimension(:), pointer :: x_scal - call SNESGetDM(mechanical_snes,dm_local,ierr); CHKERRQ(ierr) - call DMGetDS(dm_local,mechQuad,ierr); CHKERRQ(ierr) - call DMGetLocalSection(dm_local,section,ierr); CHKERRQ(ierr) - call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) - call DMGetDimension(dm_local,dimPlex,ierr); CHKERRQ(ierr) + call SNESGetDM(mechanical_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDS(dm_local,mechQuad,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) + call DMGetDimension(dm_local,dimPlex,err_PETSc); CHKERRQ(err_PETSc) ! write cell vertex displacements - call DMPlexGetDepthStratum(dm_local,0,pStart,pEnd,ierr); CHKERRQ(ierr) + call DMPlexGetDepthStratum(dm_local,0,pStart,pEnd,err_PETSc); CHKERRQ(err_PETSc) allocate(nodeCoords(3,pStart:pEnd-1),source=0.0_pReal) - call VecGetArrayF90(x_local,nodeCoords_linear,ierr); CHKERRQ(ierr) + call VecGetArrayF90(x_local,nodeCoords_linear,err_PETSc); CHKERRQ(err_PETSc) do p=pStart, pEnd-1 - call DMPlexGetPointLocal(dm_local, p, s, e, ierr); CHKERRQ(ierr) + call DMPlexGetPointLocal(dm_local, p, s, e, err_PETSc); CHKERRQ(err_PETSc) nodeCoords(1:dimPlex,p)=nodeCoords_linear(s+1:e) enddo call discretization_setNodeCoords(nodeCoords) - call VecRestoreArrayF90(x_local,nodeCoords_linear,ierr); CHKERRQ(ierr) + call VecRestoreArrayF90(x_local,nodeCoords_linear,err_PETSc); CHKERRQ(err_PETSc) ! write ip displacements - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,ierr); CHKERRQ(ierr) - call PetscDSGetTabulation(mechQuad,0,basisField,basisFieldDer,ierr); CHKERRQ(ierr) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call PetscDSGetTabulation(mechQuad,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) + CHKERRQ(err_PETSc) allocate(ipCoords(3,nQuadrature,mesh_NcpElems),source=0.0_pReal) do c=cellStart,cellEnd-1 qOffset=0 - call DMPlexVecGetClosure(dm_local,section,x_local,c,x_scal,ierr); CHKERRQ(ierr) !< get nodal coordinates of each element + call DMPlexVecGetClosure(dm_local,section,x_local,c,x_scal,err_PETSc) !< get nodal coordinates of each element + CHKERRQ(err_PETSc) do qPt=0,nQuadrature-1 qOffset= qPt * (size(basisField)/nQuadrature) - do comp=0,dimPlex-1 !< loop over components + do comp=0,dimPlex-1 !< loop over components nOffset=0 q = comp do n=0,nBasis-1 @@ -737,10 +762,11 @@ subroutine FEM_mechanical_updateCoords() enddo enddo enddo - call DMPlexVecRestoreClosure(dm_local,section,x_local,c,x_scal,ierr); CHKERRQ(ierr) + call DMPlexVecRestoreClosure(dm_local,section,x_local,c,x_scal,err_PETSc) + CHKERRQ(err_PETSc) end do call discretization_setIPcoords(reshape(ipCoords,[3,mesh_NcpElems*nQuadrature])) - call DMRestoreLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr) + call DMRestoreLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) end subroutine FEM_mechanical_updateCoords diff --git a/src/results.f90 b/src/results.f90 index d85a30dc0..dc77d5a7f 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -499,7 +499,7 @@ subroutine results_mapping_phase(ID,entry,label) integer(pI64), dimension(size(entry,1),size(entry,2)) :: & entryGlobal - integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process + integer(pI64), dimension(size(label),0:worldsize-1) :: entryOffset !< offset in entry counting per process integer, dimension(0:worldsize-1) :: writeSize !< amount of data written per process integer(HSIZE_T), dimension(2) :: & myShape, & !< shape of the dataset (this process) From d7dbb6ffc2b06278c42793bcf86f0cde32c5c044 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 12:03:22 +0100 Subject: [PATCH 36/58] needs to be public for Marc --- src/parallelization.f90 | 1 + 1 file changed, 1 insertion(+) diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 94bf6d343..d9ca15981 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -33,6 +33,7 @@ module parallelization #endif #ifndef PETSC +public :: parallelization_bcast_str contains subroutine parallelization_bcast_str(string) From 91a3ea96ec82c719d2de60738a4998b853a6ad3a Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 12:41:11 +0100 Subject: [PATCH 37/58] final MPI-DAMASK integer kind decoupling bugfix: set error for openMP-calucations --- src/grid/grid_damage_spectral.f90 | 100 ++++++++++++++++------------- src/grid/grid_thermal_spectral.f90 | 90 +++++++++++++++----------- src/quit.f90 | 8 ++- 3 files changed, 114 insertions(+), 84 deletions(-) diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 69370b211..480f3c944 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -69,7 +69,8 @@ subroutine grid_damage_spectral_init() PetscInt, dimension(0:worldsize-1) :: localK DM :: damage_grid Vec :: uBound, lBound - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc class(tNode), pointer :: & num_grid, & num_generic @@ -99,18 +100,19 @@ subroutine grid_damage_spectral_init() !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-damage_snes_type newtonls -damage_snes_mf & - &-damage_snes_ksp_ew -damage_ksp_type fgmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-damage_snes_ksp_ew -damage_ksp_type fgmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,damage_snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(damage_snes,'damage_',ierr);CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,damage_snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(damage_snes,'damage_',err_PETSc);CHKERRQ(err_PETSc) localK = 0_pPetscInt localK(worldrank) = int(grid3,pPetscInt) - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -118,31 +120,31 @@ subroutine grid_damage_spectral_init() 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap) [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid - damage_grid,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(damage_snes,damage_grid,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(damage_grid,ierr); CHKERRQ(ierr) - call DMsetUp(damage_grid,ierr); CHKERRQ(ierr) - call DMCreateGlobalVector(damage_grid,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 1, i.e. every def grad tensor) - call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESSetFromOptions(damage_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments - call SNESGetType(damage_snes,snes_type,ierr); CHKERRQ(ierr) + damage_grid,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(damage_snes,damage_grid,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(damage_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(damage_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc); CHKERRQ(err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) + call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESSetFromOptions(damage_snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments + call SNESGetType(damage_snes,snes_type,err_PETSc); CHKERRQ(err_PETSc) if (trim(snes_type) == 'vinewtonrsls' .or. & trim(snes_type) == 'vinewtonssls') then - call DMGetGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr) - call DMGetGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr) - call VecSet(lBound,0.0_pReal,ierr); CHKERRQ(ierr) - call VecSet(uBound,1.0_pReal,ierr); CHKERRQ(ierr) - call SNESVISetVariableBounds(damage_snes,lBound,uBound,ierr) ! variable bounds for variational inequalities like contact mechanics, damage etc. - call DMRestoreGlobalVector(damage_grid,lBound,ierr); CHKERRQ(ierr) - call DMRestoreGlobalVector(damage_grid,uBound,ierr); CHKERRQ(ierr) + call DMGetGlobalVector(damage_grid,lBound,err_PETSc); CHKERRQ(err_PETSc) + call DMGetGlobalVector(damage_grid,uBound,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(lBound,0.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call VecSet(uBound,1.0_pReal,err_PETSc); CHKERRQ(err_PETSc) + call SNESVISetVariableBounds(damage_snes,lBound,uBound,err_PETSc) ! variable bounds for variational inequalities like contact mechanics, damage etc. + call DMRestoreGlobalVector(damage_grid,lBound,err_PETSc); CHKERRQ(err_PETSc) + call DMRestoreGlobalVector(damage_grid,uBound,err_PETSc); CHKERRQ(err_PETSc) end if !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAGetCorners(damage_grid,xstart,ystart,zstart,xend,yend,zend,ierr) - CHKERRQ(ierr) + call DMDAGetCorners(damage_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) + CHKERRQ(err_PETSc) xend = xstart + xend - 1 yend = ystart + yend - 1 zend = zstart + zend - 1 @@ -150,7 +152,7 @@ subroutine grid_damage_spectral_init() allocate(phi_lastInc(grid(1),grid(2),grid3), source=1.0_pReal) allocate(phi_stagInc(grid(1),grid(2),grid3), source=1.0_pReal) - call VecSet(solution_vec,1.0_pReal,ierr); CHKERRQ(ierr) + call VecSet(solution_vec,1.0_pReal,err_PETSc); CHKERRQ(err_PETSc) call updateReference @@ -169,7 +171,8 @@ function grid_damage_spectral_solution(Delta_t) result(solution) PetscInt :: devNull PetscReal :: phi_min, phi_max, stagNorm - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason solution%converged =.false. @@ -178,8 +181,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution) ! set module wide availabe data params%Delta_t = Delta_t - call SNESSolve(damage_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) - call SNESGetConvergedReason(damage_snes,reason,ierr); CHKERRQ(ierr) + call SNESSolve(damage_snes,PETSC_NULL_VEC,solution_vec,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(damage_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) if (reason < 1) then solution%converged = .false. @@ -189,9 +194,11 @@ function grid_damage_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(phi_current - phi_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*maxval(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' phi_stagInc = phi_current !-------------------------------------------------------------------------------------------------- @@ -202,8 +209,8 @@ function grid_damage_spectral_solution(Delta_t) result(solution) call homogenization_set_phi(phi_current(i,j,k),ce) end do; end do; end do - call VecMin(solution_vec,devNull,phi_min,ierr); CHKERRQ(ierr) - call VecMax(solution_vec,devNull,phi_max,ierr); CHKERRQ(ierr) + call VecMin(solution_vec,devNull,phi_min,err_PETSc); CHKERRQ(err_PETSc) + call VecMax(solution_vec,devNull,phi_max,err_PETSc); CHKERRQ(err_PETSc) if (solution%converged) & print'(/,1x,a)', '... nonlocal damage converged .....................................' print'(/,1x,a,f8.6,2x,f8.6,2x,e11.4)', 'Minimum|Maximum|Delta Damage = ', phi_min, phi_max, stagNorm @@ -222,17 +229,19 @@ subroutine grid_damage_spectral_forward(cutBack) integer :: i, j, k, ce DM :: dm_local PetscScalar, dimension(:,:,:), pointer :: x_scal - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc if (cutBack) then phi_current = phi_lastInc phi_stagInc = phi_lastInc !-------------------------------------------------------------------------------------------------- ! reverting damage field state - call SNESGetDM(damage_snes,dm_local,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with + call SNESGetDM(damage_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,err_PETSc) !< get the data out of PETSc to work with + CHKERRQ(err_PETSc) x_scal(xstart:xend,ystart:yend,zstart:zend) = phi_current - call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ce = 0 do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) ce = ce + 1 @@ -249,7 +258,7 @@ end subroutine grid_damage_spectral_forward !-------------------------------------------------------------------------------------------------- !> @brief forms the spectral damage residual vector !-------------------------------------------------------------------------------------------------- -subroutine formResidual(in,x_scal,f_scal,dummy,ierr) +subroutine formResidual(in,x_scal,f_scal,dummy,dummy_err) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: & in @@ -260,7 +269,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: & f_scal PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: dummy_err integer :: i, j, k, ce @@ -311,7 +320,8 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference() - integer :: ce,ierr + integer :: ce + integer(MPI_INTEGER_KIND) :: err_MPI K_ref = 0.0_pReal @@ -322,9 +332,11 @@ subroutine updateReference() end do K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end subroutine updateReference diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index 548fa4b3c..edc703858 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -71,7 +71,8 @@ subroutine grid_thermal_spectral_init(T_0) integer :: i, j, k, ce DM :: thermal_grid PetscScalar, dimension(:,:,:), pointer :: T_PETSc - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc class(tNode), pointer :: & num_grid @@ -94,18 +95,19 @@ subroutine grid_thermal_spectral_init(T_0) !-------------------------------------------------------------------------------------------------- ! set default and user defined options for PETSc call PetscOptionsInsertString(PETSC_NULL_OPTIONS,'-thermal_snes_type newtonls -thermal_snes_mf & - &-thermal_snes_ksp_ew -thermal_ksp_type fgmres',ierr) - CHKERRQ(ierr) - call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),ierr) - CHKERRQ(ierr) + &-thermal_snes_ksp_ew -thermal_ksp_type fgmres',err_PETSc) + CHKERRQ(err_PETSc) + call PetscOptionsInsertString(PETSC_NULL_OPTIONS,num_grid%get_asString('petsc_options',defaultVal=''),err_PETSc) + CHKERRQ(err_PETSc) !-------------------------------------------------------------------------------------------------- ! initialize solver specific parts of PETSc - call SNESCreate(PETSC_COMM_WORLD,thermal_snes,ierr); CHKERRQ(ierr) - call SNESSetOptionsPrefix(thermal_snes,'thermal_',ierr);CHKERRQ(ierr) + call SNESCreate(PETSC_COMM_WORLD,thermal_snes,err_PETSc); CHKERRQ(err_PETSc) + call SNESSetOptionsPrefix(thermal_snes,'thermal_',err_PETSc);CHKERRQ(err_PETSc) localK = 0_pPetscInt localK(worldrank) = int(grid3,pPetscInt) - call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDACreate3D(PETSC_COMM_WORLD, & DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point @@ -113,20 +115,21 @@ subroutine grid_thermal_spectral_init(T_0) 1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), & 1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap) [int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid - thermal_grid,ierr) ! handle, error - CHKERRQ(ierr) - call SNESSetDM(thermal_snes,thermal_grid,ierr); CHKERRQ(ierr) ! connect snes to da - call DMsetFromOptions(thermal_grid,ierr); CHKERRQ(ierr) - call DMsetUp(thermal_grid,ierr); CHKERRQ(ierr) - call DMCreateGlobalVector(thermal_grid,solution_vec,ierr); CHKERRQ(ierr) ! global solution vector (grid x 1, i.e. every def grad tensor) - call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,ierr) ! residual vector of same shape as solution vector - CHKERRQ(ierr) - call SNESSetFromOptions(thermal_snes,ierr); CHKERRQ(ierr) ! pull it all together with additional CLI arguments + thermal_grid,err_PETSc) ! handle, error + CHKERRQ(err_PETSc) + call SNESSetDM(thermal_snes,thermal_grid,err_PETSc); CHKERRQ(err_PETSc) ! connect snes to da + call DMsetFromOptions(thermal_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMsetUp(thermal_grid,err_PETSc); CHKERRQ(err_PETSc) + call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor) + CHKERRQ(err_PETSc) + call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector + CHKERRQ(err_PETSc) + call SNESSetFromOptions(thermal_snes,err_PETSc); CHKERRQ(err_PETSc) ! pull it all together with additional CLI arguments !-------------------------------------------------------------------------------------------------- ! init fields - call DMDAGetCorners(thermal_grid,xstart,ystart,zstart,xend,yend,zend,ierr) - CHKERRQ(ierr) + call DMDAGetCorners(thermal_grid,xstart,ystart,zstart,xend,yend,zend,err_PETSc) + CHKERRQ(err_PETSc) xend = xstart + xend - 1 yend = ystart + yend - 1 zend = zstart + zend - 1 @@ -143,9 +146,11 @@ subroutine grid_thermal_spectral_init(T_0) call homogenization_thermal_setField(T_0,0.0_pReal,ce) end do; end do; end do - call DMDAVecGetArrayF90(thermal_grid,solution_vec,T_PETSc,ierr); CHKERRQ(ierr) + call DMDAVecGetArrayF90(thermal_grid,solution_vec,T_PETSc,err_PETSc) + CHKERRQ(err_PETSc) T_PETSc(xstart:xend,ystart:yend,zstart:zend) = T_current - call DMDAVecRestoreArrayF90(thermal_grid,solution_vec,T_PETSc,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(thermal_grid,solution_vec,T_PETSc,err_PETSc) + CHKERRQ(err_PETSc) call updateReference @@ -164,7 +169,8 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) PetscInt :: devNull PetscReal :: T_min, T_max, stagNorm - PetscErrorCode :: ierr + integer(MPI_INTEGER_KIND) :: err_MPI + PetscErrorCode :: err_PETSc SNESConvergedReason :: reason solution%converged =.false. @@ -173,8 +179,10 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) ! set module wide availabe data params%Delta_t = Delta_t - call SNESSolve(thermal_snes,PETSC_NULL_VEC,solution_vec,ierr); CHKERRQ(ierr) - call SNESGetConvergedReason(thermal_snes,reason,ierr); CHKERRQ(ierr) + call SNESSolve(thermal_snes,PETSC_NULL_VEC,solution_vec,err_PETSc) + CHKERRQ(err_PETSc) + call SNESGetConvergedReason(thermal_snes,reason,err_PETSc) + CHKERRQ(err_PETSc) if (reason < 1) then solution%converged = .false. @@ -184,9 +192,11 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(T_current - T_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*maxval(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' T_stagInc = T_current !-------------------------------------------------------------------------------------------------- @@ -197,8 +207,8 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce) end do; end do; end do - call VecMin(solution_vec,devNull,T_min,ierr); CHKERRQ(ierr) - call VecMax(solution_vec,devNull,T_max,ierr); CHKERRQ(ierr) + call VecMin(solution_vec,devNull,T_min,err_PETSc); CHKERRQ(err_PETSc) + call VecMax(solution_vec,devNull,T_max,err_PETSc); CHKERRQ(err_PETSc) if (solution%converged) & print'(/,1x,a)', '... thermal conduction converged ..................................' print'(/,1x,a,f8.4,2x,f8.4,2x,f8.4)', 'Minimum|Maximum|Delta Temperature / K = ', T_min, T_max, stagNorm @@ -217,7 +227,7 @@ subroutine grid_thermal_spectral_forward(cutBack) integer :: i, j, k, ce DM :: dm_local PetscScalar, dimension(:,:,:), pointer :: x_scal - PetscErrorCode :: ierr + PetscErrorCode :: err_PETSc if (cutBack) then T_current = T_lastInc @@ -225,10 +235,13 @@ subroutine grid_thermal_spectral_forward(cutBack) !-------------------------------------------------------------------------------------------------- ! reverting thermal field state - call SNESGetDM(thermal_snes,dm_local,ierr); CHKERRQ(ierr) - call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) !< get the data out of PETSc to work with + call SNESGetDM(thermal_snes,dm_local,err_PETSc) + CHKERRQ(err_PETSc) + call DMDAVecGetArrayF90(dm_local,solution_vec,x_scal,err_PETSc) !< get the data out of PETSc to work with + CHKERRQ(err_PETSc) x_scal(xstart:xend,ystart:yend,zstart:zend) = T_current - call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,ierr); CHKERRQ(ierr) + call DMDAVecRestoreArrayF90(dm_local,solution_vec,x_scal,err_PETSc) + CHKERRQ(err_PETSc) ce = 0 do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) ce = ce + 1 @@ -245,7 +258,7 @@ end subroutine grid_thermal_spectral_forward !-------------------------------------------------------------------------------------------------- !> @brief forms the spectral thermal residual vector !-------------------------------------------------------------------------------------------------- -subroutine formResidual(in,x_scal,f_scal,dummy,ierr) +subroutine formResidual(in,x_scal,f_scal,dummy,dummy_err) DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: & in @@ -256,7 +269,7 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: & f_scal PetscObject :: dummy - PetscErrorCode :: ierr + PetscErrorCode :: dummy_err integer :: i, j, k, ce T_current = x_scal @@ -301,7 +314,8 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference() - integer :: ce,ierr + integer :: ce + integer(MPI_INTEGER_KIND) :: err_MPI K_ref = 0.0_pReal @@ -312,9 +326,11 @@ subroutine updateReference() end do K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,ierr) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end subroutine updateReference diff --git a/src/quit.f90 b/src/quit.f90 index 3f9aabdf5..3c5f5e6f2 100644 --- a/src/quit.f90 +++ b/src/quit.f90 @@ -20,16 +20,18 @@ subroutine quit(stop_id) PetscErrorCode :: err_PETSc call h5open_f(err_HDF5) - if (err_HDF5 /= 0) write(6,'(a,i5)') ' Error in h5open_f ',err_HDF5 ! prevents error if not opened yet + if (err_HDF5 /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in h5open_f ',err_HDF5 ! prevents error if not opened yet call h5close_f(err_HDF5) - if (err_HDF5 /= 0) write(6,'(a,i5)') ' Error in h5close_f ',err_HDF5 + if (err_HDF5 /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in h5close_f ',err_HDF5 call PetscFinalize(err_PETSc) CHKERRQ(err_PETSc) #ifdef _OPENMP call MPI_finalize(err_MPI) - if (err_MPI /= 0) write(6,'(a,i5)') ' Error in MPI_finalize',err_MPI + if (err_MPI /= 0_MPI_INTEGER_KIND) write(6,'(a,i5)') ' Error in MPI_finalize',err_MPI +#else + err_MPI = 0_MPI_INTEGER_KIND #endif call date_and_time(values = dateAndTime) From 29530da579ec1ec28b4c75ca8fff88ac2def098e Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 13:50:30 +0100 Subject: [PATCH 38/58] use correct kind of constants for calls to MPI/PETSc --- src/grid/discretization_grid.f90 | 6 ++-- src/grid/grid_damage_spectral.f90 | 4 +-- src/grid/grid_mech_FEM.f90 | 8 ++--- src/grid/grid_mech_spectral_basic.f90 | 16 ++++----- src/grid/grid_mech_spectral_polarisation.f90 | 18 +++++----- src/grid/grid_thermal_spectral.f90 | 8 ++--- src/grid/spectral_utilities.f90 | 35 ++++++++++---------- src/mesh/FEM_utilities.f90 | 2 +- src/mesh/discretization_mesh.f90 | 16 ++++----- 9 files changed, 57 insertions(+), 56 deletions(-) diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 93b3cbc91..738206f9a 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -88,12 +88,12 @@ subroutine discretization_grid_init(restart) end if - call MPI_Bcast(grid,3,MPI_INTEGER,0,MPI_COMM_WORLD, err_MPI) + call MPI_Bcast(grid,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') - call MPI_Bcast(geomSize,3,MPI_DOUBLE,0,MPI_COMM_WORLD, err_MPI) + call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(origin,3,MPI_DOUBLE,0,MPI_COMM_WORLD, err_MPI) + call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' print'(/,1x,a,3(i12,1x))', 'cells a b c: ', grid diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 480f3c944..8955f9d66 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -194,10 +194,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(phi_current - phi_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*maxval(phi_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' phi_stagInc = phi_current diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index dbb87440b..010f11cea 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -241,16 +241,16 @@ subroutine grid_mechanical_FEM_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index 2d5376781..74400f160 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -194,16 +194,16 @@ subroutine grid_mechanical_spectral_basic_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -223,10 +223,10 @@ subroutine grid_mechanical_spectral_basic_init restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) @@ -235,7 +235,7 @@ subroutine grid_mechanical_spectral_basic_init call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_File_close(fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' @@ -503,7 +503,7 @@ subroutine formResidual(in, F, & call utilities_constitutiveResponse(residuum, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index a6ce8d173..765a1a5a7 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -216,16 +216,16 @@ subroutine grid_mechanical_spectral_polarisation_init groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(P_aim,groupHandle,'P_aim',.false.) - call MPI_Bcast(P_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim,groupHandle,'F_aim',.false.) - call MPI_Bcast(F_aim,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) - call MPI_Bcast(F_aim_lastInc,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) - call MPI_Bcast(F_aimDot,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(F,groupHandle,'F') call HDF5_read(F_lastInc,groupHandle,'F_lastInc') @@ -250,10 +250,10 @@ subroutine grid_mechanical_spectral_polarisation_init restartRead2: if (interface_restartInc > 0) then print'(1x,a,i0,a)', 'reading more restart data of increment ', interface_restartInc, ' from file' call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) - call MPI_Bcast(C_volAvg,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) - call MPI_Bcast(C_volAvgLastInc,81,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call HDF5_closeGroup(groupHandle) @@ -262,7 +262,7 @@ subroutine grid_mechanical_spectral_polarisation_init call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', & MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_File_read(fileUnit,C_minMaxAvg,81,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) + call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_File_close(fileUnit,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' @@ -564,7 +564,7 @@ subroutine formResidual(in, FandF_tau, & X_RANGE, Y_RANGE, Z_RANGE) F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,F_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call SNESGetNumberFunctionEvals(snes,nfuncs,err_PETSc); CHKERRQ(err_PETSc) @@ -609,7 +609,7 @@ subroutine formResidual(in, FandF_tau, & call utilities_constitutiveResponse(residual_F, & ! "residuum" gets field of first PK stress (to save memory) P_av,C_volAvg,C_minMaxAvg, & F - residual_F_tau/num%beta,params%Delta_t,params%rotation_BC) - call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) !-------------------------------------------------------------------------------------------------- ! stress BC handling diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index edc703858..8862df725 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -192,10 +192,10 @@ function grid_thermal_spectral_solution(Delta_t) result(solution) solution%iterationsNeeded = totalIter end if stagNorm = maxval(abs(T_current - T_stagInc)) - call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*maxval(T_current)) - call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' T_stagInc = T_current @@ -326,10 +326,10 @@ subroutine updateReference() end do K_ref = K_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' mu_ref = mu_ref*wgt - call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' end subroutine updateReference diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index 35803481c..80cde388f 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -590,7 +590,7 @@ real(pReal) function utilities_divergenceRMS() conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2) enddo; enddo if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 - call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space @@ -651,7 +651,7 @@ real(pReal) function utilities_curlRMS() + sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1) enddo; enddo - call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' utilities_curlRMS = sqrt(utilities_curlRMS) * wgt if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1 @@ -820,7 +820,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3]) P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (debugRotation) print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & 'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pReal @@ -845,21 +845,21 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,& enddo valueAndRank = [dPdF_norm_max,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD, err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(dPdF_max,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI) + call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' valueAndRank = [dPdF_norm_min,real(worldrank,pReal)] - call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD, err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(dPdF_min,81,MPI_DOUBLE,int(valueAndRank(2)),MPI_COMM_WORLD, err_MPI) + call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_minmaxAvg = 0.5_pReal*(dPdF_max + dPdF_min) C_volAvg = sum(homogenization_dPdF,dim=5) - call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' C_volAvg = C_volAvg * wgt @@ -914,7 +914,7 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim) utilities_forwardField = field_lastInc + rate*Delta_t if (present(aim)) then !< correct to match average fieldDiff = sum(sum(sum(utilities_forwardField,dim=5),dim=4),dim=3)*wgt - call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' fieldDiff = fieldDiff - aim utilities_forwardField = utilities_forwardField - & @@ -985,8 +985,9 @@ subroutine utilities_updateCoords(F) real(pReal), dimension(3, grid(1)+1,grid(2)+1,grid3+1) :: nodeCoords integer :: & i,j,k,n, & - rank_t, rank_b, & c + integer(MPI_INTEGER_KIND) :: & + rank_t, rank_b integer(MPI_INTEGER_KIND) :: err_MPI #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) type(MPI_Request), dimension(4) :: request @@ -1029,26 +1030,26 @@ subroutine utilities_updateCoords(F) !-------------------------------------------------------------------------------------------------- ! average F if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt - call MPI_Bcast(Favg,9,MPI_DOUBLE,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' !-------------------------------------------------------------------------------------------------- ! pad cell center fluctuations along z-direction (needed when running MPI simulation) IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3) c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer - rank_t = modulo(worldrank+1,worldsize) - rank_b = modulo(worldrank-1,worldsize) + rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize) + rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize) ! send bottom layer to process below - call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0,MPI_COMM_WORLD,request(1),err_MPI) + call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0,MPI_COMM_WORLD,request(2),err_MPI) + call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' ! send top layer to process above - call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1,MPI_COMM_WORLD,request(3),err_MPI) + call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1,MPI_COMM_WORLD,request(4),err_MPI) + call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_Waitall(4,request,status,err_MPI) diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index ce97e7776..724a5520e 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -156,7 +156,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) cutBack = .false. P_av = sum(homogenization_P,dim=3) * wgt - call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) + call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index 172101976..7e12cfd7e 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -114,17 +114,17 @@ subroutine discretization_mesh_init(restart) ! get number of IDs in face sets (for boundary conditions?) call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,err_PETSc) CHKERRQ(err_PETSc) - call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(dimPlex,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (worldrank == 0) then call DMClone(globalMesh,geomMesh,err_PETSc) else - call DMPlexDistribute(globalMesh,0,sf,geomMesh,err_PETSc) + call DMPlexDistribute(globalMesh,0_pPETSCINT,sf,geomMesh,err_PETSc) endif CHKERRQ(err_PETSc) @@ -140,14 +140,14 @@ subroutine discretization_mesh_init(restart) CHKERRQ(err_PETSc) call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc) endif - call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,MPI_COMM_WORLD,err_MPI) + call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call DMDestroy(globalMesh,err_PETSc); CHKERRQ(err_PETSc) call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,err_PETSc) CHKERRQ(err_PETSc) - call DMGetStratumSize(geomMesh,'depth',0,mesh_Nnodes,err_PETSc) + call DMGetStratumSize(geomMesh,'depth',0_pPETSCINT,mesh_Nnodes,err_PETSc) CHKERRQ(err_PETSc) ! Get initial nodal coordinates @@ -197,7 +197,7 @@ subroutine mesh_FEM_build_ipVolumes(dimPlex) allocate(mesh_ipVolume(mesh_maxNips,mesh_NcpElems),source=0.0_pReal) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,err_PETSc) + call DMPlexGetHeightStratum(geomMesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) CHKERRQ(err_PETSc) allocate(pCent(dimPlex)) allocate(pNorm(dimPlex)) @@ -229,7 +229,7 @@ subroutine mesh_FEM_build_ipCoordinates(dimPlex,qPoints) allocate(pV0(dimPlex)) allocatE(pCellJ(dimPlex**2)) allocatE(pinvCellJ(dimPlex**2)) - call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,err_PETSc) + call DMPlexGetHeightStratum(geomMesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements call DMPlexComputeCellGeometryAffineFEM(geomMesh,cell,pV0,pCellJ,pInvcellJ,detJ,err_PETSc) From 4562f4262cb85a08f4b89e53cab6273d6f7480c4 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Thu, 13 Jan 2022 10:14:10 -0500 Subject: [PATCH 39/58] add documentation to source code --- src/lattice.f90 | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index 469bfa556..1f824cc98 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -788,7 +788,26 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( 68,68,69, 66,66,67, 64,64,65,64,64,65, 60,63,63,60,62,62,60,61,61,60,62,62, 59,58,58,57,58,58, & ! 2. pyr 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,63,63,60,62,62,60,61,61,60, 58,59,58,58,57,58, & 69,68,68, 67,66,66, 64,65,64,64,65,64, 61,60,62,62,60,63,63,60,62,62,60,61, 58,58,59,58,58,57 & - ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme) + ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme and 10.1016/j.ijplas.2014.06.010 table 3) + !< 1: S1 basal self-interaction + !< 2: 1 coplanar basal/basal + !< 3: 3 collinear basal/prismatic + !< 4: 4 non-collinear basal prismatic + !< 5: S2 prismatic self-interaction + !< 6: 2 prismatic/prismatic + !< 7: 5 collinear prismatic/basal + !< 8: 6 non-collinear prismatic/basal + !< 47: 8 non-collinear basal/pyramidal + !< 48: 7 collinear basal/pyramidal + !< 49: 10 non-collinear prismatic/pyramidal + !< 50: 9 collinear prismatic/pyramidal + !< 57: S3 pyramidal c+a self-interaction + !< 58: 16 non-collinear pyramidal/pyramidal + !< 59: 15 collinear pyramidal/pyramidal + !< 66: 14 non-collienar pyramidal/prismatic + !< 67: 13 semi-collienar pyramidal/prismatic + !< 68: 12 non-collinear pyramidal/basal + !< 69: 11 semi-collinear pyramidal/basal integer, dimension(BCT_NSLIP,BCT_NSLIP), parameter :: & BCT_INTERACTIONSLIPSLIP = reshape( [& From 82dabe29c149072b3abf306eabcfdc622a05f2aa Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Thu, 13 Jan 2022 16:01:06 +0100 Subject: [PATCH 40/58] mesh: separate kind for DAMASK and PETSc integers --- .gitlab-ci.yml | 7 ++++ src/mesh/FEM_utilities.f90 | 2 +- src/mesh/discretization_mesh.f90 | 28 +++++++------ src/mesh/mesh_mech_FEM.f90 | 72 +++++++++++++++++--------------- 4 files changed, 62 insertions(+), 47 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 001c326a4..959cc961a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -100,6 +100,13 @@ test_grid_GNU-64bit: - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU-64bit +test_mesh_GNU-64bit: + stage: compile + script: + - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit + - cd PRIVATE/testing/pytest + - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU-64bit + test_grid_IntelLLVM: stage: compile script: diff --git a/src/mesh/FEM_utilities.f90 b/src/mesh/FEM_utilities.f90 index 724a5520e..703c6c818 100644 --- a/src/mesh/FEM_utilities.f90 +++ b/src/mesh/FEM_utilities.f90 @@ -50,7 +50,7 @@ module FEM_utilities type, public :: tSolutionState !< return type of solution from FEM solver variants logical :: converged = .true. logical :: stagConverged = .true. - integer :: iterationsNeeded = 0 + PetscInt :: iterationsNeeded = 0_pPETSCINT end type tSolutionState type, public :: tComponentBC diff --git a/src/mesh/discretization_mesh.f90 b/src/mesh/discretization_mesh.f90 index 7e12cfd7e..8623ebee4 100644 --- a/src/mesh/discretization_mesh.f90 +++ b/src/mesh/discretization_mesh.f90 @@ -71,22 +71,22 @@ subroutine discretization_mesh_init(restart) logical, intent(in) :: restart - integer :: dimPlex, & + PetscInt :: dimPlex, & mesh_Nnodes, & !< total number of nodes in mesh j, & debug_element, debug_ip PetscSF :: sf DM :: globalMesh - PetscInt :: nFaceSets + PetscInt :: nFaceSets, Nboundaries, NelemsGlobal, Nelems PetscInt, pointer, dimension(:) :: pFaceSets IS :: faceSetIS PetscErrorCode :: err_PETSc integer(MPI_INTEGER_KIND) :: err_MPI - integer, dimension(:), allocatable :: & + PetscInt, dimension(:), allocatable :: & materialAt class(tNode), pointer :: & num_mesh - integer :: p_i !< integration order (quadrature rule) + integer :: p_i, dim !< integration order (quadrature rule) type(tvec) :: coords_node0 print'(/,1x,a)', '<<<+- discretization_mesh init -+>>>' @@ -105,20 +105,23 @@ subroutine discretization_mesh_init(restart) CHKERRQ(err_PETSc) call DMGetDimension(globalMesh,dimPlex,err_PETSc) CHKERRQ(err_PETSc) - call DMGetStratumSize(globalMesh,'depth',dimPlex,mesh_NcpElemsGlobal,err_PETSc) + call DMGetStratumSize(globalMesh,'depth',dimPlex,NelemsGlobal,err_PETSc) CHKERRQ(err_PETSc) - print'()' + mesh_NcpElemsGlobal = int(NelemsGlobal) call DMView(globalMesh, PETSC_VIEWER_STDOUT_WORLD,err_PETSc) CHKERRQ(err_PETSc) ! get number of IDs in face sets (for boundary conditions?) - call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,err_PETSc) + call DMGetLabelSize(globalMesh,'Face Sets',Nboundaries,err_PETSc) CHKERRQ(err_PETSc) + mesh_Nboundaries = int(Nboundaries) call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' - call MPI_Bcast(dimPlex,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + dim = int(dimPlex) + call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) + dimPlex = int(dim,pPETSCINT) if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' if (worldrank == 0) then @@ -128,7 +131,7 @@ subroutine discretization_mesh_init(restart) endif CHKERRQ(err_PETSc) - allocate(mesh_boundaries(mesh_Nboundaries), source = 0) + allocate(mesh_boundaries(mesh_Nboundaries), source = 0_pPETSCINT) call DMGetLabelSize(globalMesh,'Face Sets',nFaceSets,err_PETSc) CHKERRQ(err_PETSc) call DMGetLabelIdIS(globalMesh,'Face Sets',faceSetIS,err_PETSc) @@ -145,8 +148,9 @@ subroutine discretization_mesh_init(restart) call DMDestroy(globalMesh,err_PETSc); CHKERRQ(err_PETSc) - call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,err_PETSc) + call DMGetStratumSize(geomMesh,'depth',dimPlex,Nelems,err_PETSc) CHKERRQ(err_PETSc) + mesh_NcpElems = int(Nelems) call DMGetStratumSize(geomMesh,'depth',0_pPETSCINT,mesh_Nnodes,err_PETSc) CHKERRQ(err_PETSc) @@ -166,7 +170,7 @@ subroutine discretization_mesh_init(restart) call DMGetLabelValue(geomMesh,'Cell Sets',j-1,materialAt(j),err_PETSc) CHKERRQ(err_PETSc) enddo - materialAt = materialAt + 1 + materialAt = materialAt + 1_pPETSCINT if (debug_element < 1 .or. debug_element > mesh_NcpElems) call IO_error(602,ext_msg='element') if (debug_ip < 1 .or. debug_ip > mesh_maxNips) call IO_error(602,ext_msg='IP') @@ -175,7 +179,7 @@ subroutine discretization_mesh_init(restart) mesh_node0(1:dimPlex,:) = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes]) - call discretization_init(materialAt,& + call discretization_init(int(materialAt),& reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), & mesh_node0) diff --git a/src/mesh/mesh_mech_FEM.f90 b/src/mesh/mesh_mech_FEM.f90 index bb7bfaece..8cf9dbfa1 100644 --- a/src/mesh/mesh_mech_FEM.f90 +++ b/src/mesh/mesh_mech_FEM.f90 @@ -40,7 +40,7 @@ module mesh_mechanical_FEM type(tSolutionParams) :: params type, private :: tNumerics - integer :: & + PetscInt :: & p_i, & !< integration order (quadrature rule) itmax logical :: & @@ -55,7 +55,8 @@ module mesh_mechanical_FEM ! PETSc data SNES :: mechanical_snes Vec :: solution, solution_rate, solution_local - PetscInt :: dimPlex, cellDof, nQuadrature, nBasis + PetscInt :: dimPlex, cellDof, nBasis + integer :: nQuadrature PetscReal, allocatable, target :: qPoints(:), qWeights(:) MatNullSpace :: matnull @@ -104,8 +105,8 @@ subroutine FEM_mechanical_init(fieldBC) PetscReal :: detJ PetscReal, allocatable, target :: cellJMat(:,:) - PetscScalar, pointer :: px_scal(:) - PetscScalar, allocatable, target :: x_scal(:) + PetscScalar, pointer, dimension(:) :: px_scal + PetscScalar, allocatable, target, dimension(:) :: x_scal character(len=*), parameter :: prefix = 'mechFE_' PetscErrorCode :: err_PETSc @@ -118,8 +119,8 @@ subroutine FEM_mechanical_init(fieldBC) !----------------------------------------------------------------------------- ! read numerical parametes and do sanity checks num_mesh => config_numerics%get('mesh',defaultVal=emptyDict) - num%p_i = num_mesh%get_asInt('p_i',defaultVal = 2) - num%itmax = num_mesh%get_asInt('itmax',defaultVal=250) + num%p_i = int(num_mesh%get_asInt('p_i',defaultVal = 2),pPETSCINT) + num%itmax = int(num_mesh%get_asInt('itmax',defaultVal=250),pPETSCINT) num%BBarStabilisation = num_mesh%get_asBool('bbarstabilisation',defaultVal = .false.) num%eps_struct_atol = num_mesh%get_asFloat('eps_struct_atol', defaultVal = 1.0e-10_pReal) num%eps_struct_rtol = num_mesh%get_asFloat('eps_struct_rtol', defaultVal = 1.0e-4_pReal) @@ -143,7 +144,7 @@ subroutine FEM_mechanical_init(fieldBC) call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,err_PETSc) CHKERRQ(err_PETSc) nc = dimPlex - call PetscQuadratureSetData(mechQuad,dimPlex,nc,nQuadrature,qPointsP,qWeightsP,err_PETSc) + call PetscQuadratureSetData(mechQuad,dimPlex,nc,int(nQuadrature,pPETSCINT),qPointsP,qWeightsP,err_PETSc) CHKERRQ(err_PETSc) call PetscFECreateDefault(PETSC_COMM_SELF,dimPlex,nc,PETSC_TRUE,prefix, & num%p_i,mechFE,err_PETSc); CHKERRQ(err_PETSc) @@ -165,7 +166,7 @@ subroutine FEM_mechanical_init(fieldBC) call DMPlexLabelComplete(mechanical_mesh,BCLabel,err_PETSc); CHKERRQ(err_PETSc) call DMGetLocalSection(mechanical_mesh,section,err_PETSc); CHKERRQ(err_PETSc) allocate(pnumComp(1), source=dimPlex) - allocate(pnumDof(0:dimPlex), source = 0) + allocate(pnumDof(0:dimPlex), source = 0_pPETSCINT) do topologDim = 0, dimPlex call DMPlexGetDepthStratum(mechanical_mesh,topologDim,cellStart,cellEnd,err_PETSc) CHKERRQ(err_PETSc) @@ -176,14 +177,14 @@ subroutine FEM_mechanical_init(fieldBC) do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(faceSet)) numBC = numBC + 1 enddo; enddo - allocate(pbcField(numBC), source=0) + allocate(pbcField(numBC), source=0_pPETSCINT) allocate(pbcComps(numBC)) allocate(pbcPoints(numBC)) numBC = 0 do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries if (fieldBC%componentBC(field)%Mask(faceSet)) then numBC = numBC + 1 - call ISCreateGeneral(PETSC_COMM_WORLD,1,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc) + call ISCreateGeneral(PETSC_COMM_WORLD,1_pPETSCINT,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc) CHKERRQ(err_PETSc) call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,err_PETSc) CHKERRQ(err_PETSc) @@ -196,7 +197,7 @@ subroutine FEM_mechanical_init(fieldBC) call ISRestoreIndicesF90(bcPoint,pBcPoint,err_PETSc); CHKERRQ(err_PETSc) call ISDestroy(bcPoint,err_PETSc); CHKERRQ(err_PETSc) else - call ISCreateGeneral(PETSC_COMM_WORLD,0,[0],PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) + call ISCreateGeneral(PETSC_COMM_WORLD,0_pPETSCINT,[0_pPETSCINT],PETSC_COPY_VALUES,pbcPoints(numBC),err_PETSc) CHKERRQ(err_PETSc) endif endif @@ -226,7 +227,7 @@ subroutine FEM_mechanical_init(fieldBC) CHKERRQ(err_PETSc) call DMSNESSetJacobianLocal(mechanical_mesh,FEM_mechanical_formJacobian,PETSC_NULL_VEC,err_PETSc) ! function to evaluate stiffness matrix CHKERRQ(err_PETSc) - call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1), err_PETSc) ! ignore linear solve failures CHKERRQ(err_PETSc) !< ignore linear solve failures + call SNESSetMaxLinearSolveFailures(mechanical_snes, huge(1_pPETSCINT), err_PETSc) ! ignore linear solve failures CHKERRQ(err_PETSc) call SNESSetConvergenceTest(mechanical_snes,FEM_mechanical_converged,PETSC_NULL_VEC,PETSC_NULL_FUNCTION,err_PETSc) CHKERRQ(err_PETSc) @@ -245,10 +246,10 @@ subroutine FEM_mechanical_init(fieldBC) allocate(pcellJ(dimPlex**2)) allocate(pinvcellJ(dimPlex**2)) allocate(cellJMat(dimPlex,dimPlex)) - call PetscDSGetDiscretization(mechDS,0,mechFE,err_PETSc) + call PetscDSGetDiscretization(mechDS,0_pPETSCINT,mechFE,err_PETSc) CHKERRQ(err_PETSc) call PetscFEGetDualSpace(mechFE,mechDualSpace,err_PETSc); CHKERRQ(err_PETSc) - call DMPlexGetHeightStratum(mechanical_mesh,0,cellStart,cellEnd,err_PETSc) + call DMPlexGetHeightStratum(mechanical_mesh,0_pPETSCINT,cellStart,cellEnd,err_PETSc) CHKERRQ(err_PETSc) do cell = cellStart, cellEnd-1 !< loop over all elements x_scal = 0.0_pReal @@ -352,19 +353,21 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc call DMGetLocalSection(dm_local,section,err_PETSc); CHKERRQ(err_PETSc) call DMGetDS(dm_local,prob,err_PETSc); CHKERRQ(err_PETSc) - call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,err_PETSc) + call PetscDSGetTabulation(prob,0_pPETSCINT,basisField,basisFieldDer,err_PETSc) CHKERRQ(err_PETSc) - call DMPlexGetHeightStratum(dm_local,0,cellStart,cellEnd,err_PETSc) + call DMPlexGetHeightStratum(dm_local,0_pPETSCINT,cellStart,cellEnd,err_PETSc) + CHKERRQ(err_PETSc) + call DMGetLocalVector(dm_local,x_local,err_PETSc) + CHKERRQ(err_PETSc) + call VecWAXPY(x_local,1.0,xx_local,solution_local,err_PETSc) CHKERRQ(err_PETSc) - call DMGetLocalVector(dm_local,x_local,err_PETSc); CHKERRQ(err_PETSc) - call VecWAXPY(x_local,1.0,xx_local,solution_local,err_PETSc); CHKERRQ(err_PETSc) do field = 1, dimPlex; do face = 1, mesh_Nboundaries if (params%fieldBC%componentBC(field)%Mask(face)) then call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) if (bcSize > 0) then call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) CHKERRQ(err_PETSc) - call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & + call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif @@ -515,7 +518,7 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P if (bcSize > 0) then call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) CHKERRQ(err_PETSc) - call utilities_projectBCValues(x_local,section,0,field-1,bcPoints, & + call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,params%fieldBC%componentBC(field)%Value(face),params%timeinc) call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif @@ -553,11 +556,11 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P FInv = math_inv33(F) K_eA = K_eA + matmul(transpose(BMat),MatA)*math_det33(FInv)**(1.0/real(dimPlex)) K_eB = K_eB - & - matmul(transpose(matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[dimPlex*dimPlex,1]), & + matmul(transpose(matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[dimPlex**2,1_pPETSCINT]), & matmul(reshape(FInv(1:dimPlex,1:dimPlex), & - shape=[1,dimPlex*dimPlex],order=[2,1]),BMat))),MatA) + shape=[1_pPETSCINT,dimPlex**2],order=[2,1]),BMat))),MatA) MatB = MatB & - + matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[1,dimPlex*dimPlex]),MatA) + + matmul(reshape(homogenization_F(1:dimPlex,1:dimPlex,m),shape=[1_pPETSCINT,dimPlex**2]),MatA) FAvg = FAvg + F BMatAvg = BMatAvg + BMat else @@ -568,12 +571,12 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P FInv = math_inv33(FAvg) K_e = K_eA*math_det33(FAvg/real(nQuadrature))**(1.0/real(dimPlex)) + & (matmul(matmul(transpose(BMatAvg), & - reshape(FInv(1:dimPlex,1:dimPlex),shape=[dimPlex*dimPlex,1],order=[2,1])),MatB) + & + reshape(FInv(1:dimPlex,1:dimPlex),shape=[dimPlex**2,1_pPETSCINT],order=[2,1])),MatB) + & K_eB)/real(dimPlex) else K_e = K_eA endif - K_e = (K_e + eps*math_eye(cellDof)) * abs(detJ) + K_e = (K_e + eps*math_eye(int(cellDof))) * abs(detJ) #ifndef __INTEL_COMPILER pK_e(1:cellDOF**2) => K_e #else @@ -596,7 +599,8 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P #if (PETSC_VERSION_MINOR < 14) call DMPlexCreateRigidBody(dm_local,matnull,err_PETSc); CHKERRQ(err_PETSc) #else - call DMPlexCreateRigidBody(dm_local,0,matnull,err_PETSc); CHKERRQ(err_PETSc) + call DMPlexCreateRigidBody(dm_local,0_pPETSCINT,matnull,err_PETSc) + CHKERRQ(err_PETSc) #endif call MatSetNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) call MatSetNearNullSpace(Jac,matnull,err_PETSc); CHKERRQ(err_PETSc) @@ -645,7 +649,7 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC) if (bcSize > 0) then call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) CHKERRQ(err_PETSc) - call utilities_projectBCValues(solution_local,section,0,field-1,bcPoints, & + call utilities_projectBCValues(solution_local,section,0_pPETSCINT,field-1,bcPoints, & 0.0_pReal,fieldBC%componentBC(field)%Value(face),timeinc_old) call ISDestroy(bcPoints,err_PETSc); CHKERRQ(err_PETSc) endif @@ -684,7 +688,7 @@ subroutine FEM_mechanical_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reaso if (terminallyIll) reason = SNES_DIVERGED_FUNCTION_DOMAIN print'(/,1x,a,a,i0,a,i0,f0.3)', trim(incInfo), & ' @ Iteration ',PETScIter,' mechanical residual norm = ', & - int(fnorm/divTol),fnorm/divTol-int(fnorm/divTol) + int(fnorm/divTol),fnorm/divTol-int(fnorm/divTol) ! ToDo: int casting? print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', & 'Piola--Kirchhoff stress / MPa =',transpose(P_av)*1.e-6_pReal flush(IO_STDOUT) @@ -697,9 +701,7 @@ end subroutine FEM_mechanical_converged !-------------------------------------------------------------------------------------------------- subroutine FEM_mechanical_updateCoords() - real(pReal), pointer, dimension(:) :: & - nodeCoords_linear !< nodal coordinates (dimPlex*Nnodes) - real(pReal), pointer, dimension(:,:) :: & + PetscReal, pointer, dimension(:,:) :: & nodeCoords !< nodal coordinates (3,Nnodes) real(pReal), pointer, dimension(:,:,:) :: & ipCoords !< ip coordinates (3,nQuadrature,mesh_NcpElems) @@ -717,8 +719,9 @@ subroutine FEM_mechanical_updateCoords() cellStart, cellEnd, c, n PetscSection :: section PetscQuadrature :: mechQuad - PetscReal, dimension(:), pointer :: basisField, basisFieldDer - PetscScalar, dimension(:), pointer :: x_scal + PetscReal, dimension(:), pointer :: basisField, basisFieldDer, & + nodeCoords_linear !< nodal coordinates (dimPlex*Nnodes) + PetscScalar, dimension(:), pointer :: x_scal call SNESGetDM(mechanical_snes,dm_local,err_PETSc); CHKERRQ(err_PETSc) call DMGetDS(dm_local,mechQuad,err_PETSc); CHKERRQ(err_PETSc) @@ -727,7 +730,8 @@ subroutine FEM_mechanical_updateCoords() call DMGetDimension(dm_local,dimPlex,err_PETSc); CHKERRQ(err_PETSc) ! write cell vertex displacements - call DMPlexGetDepthStratum(dm_local,0,pStart,pEnd,err_PETSc); CHKERRQ(err_PETSc) + call DMPlexGetDepthStratum(dm_local,0_pPETSCINT,pStart,pEnd,err_PETSc) + CHKERRQ(err_PETSc) allocate(nodeCoords(3,pStart:pEnd-1),source=0.0_pReal) call VecGetArrayF90(x_local,nodeCoords_linear,err_PETSc); CHKERRQ(err_PETSc) do p=pStart, pEnd-1 From 45e33782e4e7ffaa1447adb78d76a3294f289968 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Thu, 13 Jan 2022 10:42:05 -0500 Subject: [PATCH 41/58] documentating more hex interactions --- src/lattice.f90 | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index 1f824cc98..c5a237baf 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -788,15 +788,25 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( 68,68,69, 66,66,67, 64,64,65,64,64,65, 60,63,63,60,62,62,60,61,61,60,62,62, 59,58,58,57,58,58, & ! 2. pyr 68,69,68, 66,67,66, 65,64,64,65,64,64, 62,62,60,63,63,60,62,62,60,61,61,60, 58,59,58,58,57,58, & 69,68,68, 67,66,66, 64,65,64,64,65,64, 61,60,62,62,60,63,63,60,62,62,60,61, 58,58,59,58,58,57 & - ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme and 10.1016/j.ijplas.2014.06.010 table 3) + ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme) + !< 10.1016/j.ijplas.2014.06.010 table 3 + !< 10.1080/14786435.2012.699689 table 2 and 3 !< 1: S1 basal self-interaction !< 2: 1 coplanar basal/basal !< 3: 3 collinear basal/prismatic - !< 4: 4 non-collinear basal prismatic + !< 4: 4 non-collinear basal/prismatic !< 5: S2 prismatic self-interaction !< 6: 2 prismatic/prismatic !< 7: 5 collinear prismatic/basal !< 8: 6 non-collinear prismatic/basal + !< 9: non-collinear basal/pyr a + !< 10: collinear basal/pyr a + !< 11: non-collinear prismatic/pyr a + !< 12: collinear prismatic/pyr a + !< 16: non-collinear pyr a/prismatic + !< 17: collinear pyr a/prismatic + !< 18: non-collinear pyr a/basal + !< 19: collinear pyr a/basal !< 47: 8 non-collinear basal/pyramidal !< 48: 7 collinear basal/pyramidal !< 49: 10 non-collinear prismatic/pyramidal From 2c054f2b38af36e8a40ac049bb355a9e3b84f816 Mon Sep 17 00:00:00 2001 From: Philip Eisenlohr Date: Thu, 13 Jan 2022 11:23:09 -0500 Subject: [PATCH 42/58] use same hex-hex interaction documentation as webpage --- src/lattice.f90 | 97 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 70 insertions(+), 27 deletions(-) diff --git a/src/lattice.f90 b/src/lattice.f90 index c5a237baf..e18c71edf 100644 --- a/src/lattice.f90 +++ b/src/lattice.f90 @@ -791,33 +791,76 @@ function lattice_interaction_SlipBySlip(Nslip,interactionValues,lattice) result( ],shape(HEX_INTERACTIONSLIPSLIP)) !< Slip-slip interaction types for hex (onion peel naming scheme) !< 10.1016/j.ijplas.2014.06.010 table 3 !< 10.1080/14786435.2012.699689 table 2 and 3 - !< 1: S1 basal self-interaction - !< 2: 1 coplanar basal/basal - !< 3: 3 collinear basal/prismatic - !< 4: 4 non-collinear basal/prismatic - !< 5: S2 prismatic self-interaction - !< 6: 2 prismatic/prismatic - !< 7: 5 collinear prismatic/basal - !< 8: 6 non-collinear prismatic/basal - !< 9: non-collinear basal/pyr a - !< 10: collinear basal/pyr a - !< 11: non-collinear prismatic/pyr a - !< 12: collinear prismatic/pyr a - !< 16: non-collinear pyr a/prismatic - !< 17: collinear pyr a/prismatic - !< 18: non-collinear pyr a/basal - !< 19: collinear pyr a/basal - !< 47: 8 non-collinear basal/pyramidal - !< 48: 7 collinear basal/pyramidal - !< 49: 10 non-collinear prismatic/pyramidal - !< 50: 9 collinear prismatic/pyramidal - !< 57: S3 pyramidal c+a self-interaction - !< 58: 16 non-collinear pyramidal/pyramidal - !< 59: 15 collinear pyramidal/pyramidal - !< 66: 14 non-collienar pyramidal/prismatic - !< 67: 13 semi-collienar pyramidal/prismatic - !< 68: 12 non-collinear pyramidal/basal - !< 69: 11 semi-collinear pyramidal/basal + !< index & label & description + !< 1 & S1 & basal self-interaction + !< 2 & 1 & basal/basal coplanar + !< 3 & 3 & basal/prismatic collinear + !< 4 & 4 & basal/prismatic non-collinear + !< 5 & S2 & prismatic self-interaction + !< 6 & 2 & prismatic/prismatic + !< 7 & 5 & prismatic/basal collinear + !< 8 & 6 & prismatic/basal non-collinear + !< 9 & - & basal/pyramidal non-collinear + !< 10 & - & basal/pyramidal collinear + !< 11 & - & prismatic/pyramidal non-collinear + !< 12 & - & prismatic/pyramidal collinear + !< 13 & - & pyramidal self-interaction + !< 14 & - & pyramidal non-collinear + !< 15 & - & pyramidal collinear + !< 16 & - & pyramidal /prismatic non-collinear + !< 17 & - & pyramidal /prismatic collinear + !< 18 & - & pyramidal /basal non-collinear + !< 19 & - & pyramidal /basal collinear + !< 20 & - & basal/1. order pyramidal semi-collinear + !< 21 & - & basal/1. order pyramidal + !< 22 & - & basal/1. order pyramidal + !< 23 & - & prismatic/1. order pyramidal semi-collinear + !< 24 & - & prismatic/1. order pyramidal + !< 25 & - & prismatic/1. order pyramidal semi-coplanar? + !< 26 & - & pyramidal /1. order pyramidal coplanar + !< 27 & - & pyramidal /1. order pyramidal + !< 28 & - & pyramidal /1. order pyramidal semi-collinear + !< 29 & - & pyramidal /1. order pyramidal semi-coplanar + !< 30 & - & 1. order pyramidal self-interaction + !< 31 & - & 1. order pyramidal coplanar + !< 32 & - & 1. order pyramidal + !< 33 & - & 1. order pyramidal + !< 34 & - & 1. order pyramidal semi-coplanar + !< 35 & - & 1. order pyramidal semi-coplanar + !< 36 & - & 1. order pyramidal collinear + !< 37 & - & 1. order pyramidal /pyramidal coplanar + !< 38 & - & 1. order pyramidal /pyramidal semi-collinear + !< 39 & - & 1. order pyramidal /pyramidal + !< 40 & - & 1. order pyramidal /pyramidal semi-coplanar + !< 41 & - & 1. order pyramidal /prismatic semi-collinear + !< 42 & - & 1. order pyramidal /prismatic semi-coplanar + !< 43 & - & 1. order pyramidal /prismatic + !< 44 & - & 1. order pyramidal /basal semi-collinear + !< 45 & - & 1. order pyramidal /basal + !< 46 & - & 1. order pyramidal /basal + !< 47 & 8 & basal/2. order pyramidal non-collinear + !< 48 & 7 & basal/2. order pyramidal semi-collinear + !< 49 & 10 & prismatic/2. order pyramidal + !< 50 & 9 & prismatic/2. order pyramidal semi-collinear + !< 51 & - & pyramidal /2. order pyramidal + !< 52 & - & pyramidal /2. order pyramidal semi collinear + !< 53 & - & 1. order pyramidal /2. order pyramidal + !< 54 & - & 1. order pyramidal /2. order pyramidal + !< 55 & - & 1. order pyramidal /2. order pyramidal + !< 56 & - & 1. order pyramidal /2. order pyramidal collinear + !< 57 & S3 & 2. order pyramidal self-interaction + !< 58 & 16 & 2. order pyramidal non-collinear + !< 59 & 15 & 2. order pyramidal semi-collinear + !< 60 & - & 2. order pyramidal /1. order pyramidal + !< 61 & - & 2. order pyramidal /1. order pyramidal collinear + !< 62 & - & 2. order pyramidal /1. order pyramidal + !< 63 & - & 2. order pyramidal /1. order pyramidal + !< 64 & - & 2. order pyramidal /pyramidal non-collinear + !< 65 & - & 2. order pyramidal /pyramidal semi-collinear + !< 66 & 14 & 2. order pyramidal /prismatic non-collinear + !< 67 & 13 & 2. order pyramidal /prismatic semi-collinear + !< 68 & 12 & 2. order pyramidal /basal non-collinear + !< 69 & 11 & 2. order pyramidal /basal semi-collinear integer, dimension(BCT_NSLIP,BCT_NSLIP), parameter :: & BCT_INTERACTIONSLIPSLIP = reshape( [& From 6324ffcb9e9d2abe11764f4c98f4b7ba5302ccd5 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 14 Jan 2022 15:51:02 +0100 Subject: [PATCH 43/58] more robust update of VERSION file update version only if no push to development has occured in the meantime. Less error prone. Very small change for race condition remains --- .gitlab-ci.yml | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1bf5cf8b1..c27c90bce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -172,6 +172,10 @@ Marc: ################################################################################################### grid_runtime: stage: performance + before_script: + - ${LOCAL_HOME}/bin/queue ${CI_JOB_ID} --blocking + - source env/DAMASK.sh + - echo Job start:" $(date)" script: - module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL} - cd $(mktemp -d) @@ -186,10 +190,6 @@ grid_runtime: --output_dir ./ --tag ${CI_COMMIT_SHA} - if [ ${CI_COMMIT_BRANCH} == development ]; then git commit -am ${CI_PIPELINE_ID}_${CI_COMMIT_SHA}; git push; fi - before_script: - - ${LOCAL_HOME}/bin/queue ${CI_JOB_ID} --blocking - - source env/DAMASK.sh - - echo Job start:" $(date)" ################################################################################################### @@ -209,19 +209,10 @@ update_revision: script: - cd $(mktemp -d) - git clone -q git@git.damask.mpie.de:damask/DAMASK.git . - - git checkout ${CI_COMMIT_SHA} - - export VERSION=$(git describe) + - git pull + - export VERSION=$(git describe ${CI_COMMIT_SHA}) - echo ${VERSION} > python/damask/VERSION - - git add python/damask/VERSION - - git commit -m "[skip ci] updated version information after successful test of $VERSION" - - export UPDATEDREV=$(git describe) # tested state + 1 commit - - git checkout master - - git pull - - git merge $UPDATEDREV -s recursive -X ours # conflicts occur only for inconsistent state - - git push - - git checkout development - - git pull - - git merge master -s recursive -X ours -m "[skip ci] Merge branch 'master' into development" # only possible conflict is in VERSION file - - git push origin development # development is unchanged (as master is based on it) but has updated VERSION file + - git commit python/damask/VERSION -m "[skip ci] updated version information after successful test of $VERSION" + - if [ ${CI_COMMIT_SHA} == $(git rev-parse HEAD^) ]; then git push origin HEAD:master HEAD:development; fi only: - development From d1cd125a5bda0ce92a52979676fb06a1efabec1b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Jan 2022 12:19:53 +0100 Subject: [PATCH 44/58] direct array reversal, don't duplicate FFTW manual --- src/grid/spectral_utilities.f90 | 66 ++++++++++++++++----------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/src/grid/spectral_utilities.f90 b/src/grid/spectral_utilities.f90 index f40025039..3dfb87243 100644 --- a/src/grid/spectral_utilities.f90 +++ b/src/grid/spectral_utilities.f90 @@ -37,7 +37,7 @@ module spectral_utilities !-------------------------------------------------------------------------------------------------- ! variables storing information for spectral method and FFTW - real (C_DOUBLE), public, dimension(:,:,:,:,:), pointer :: tensorField_real !< real representation (some stress or deformation) of field_fourier + real(C_DOUBLE), public, dimension(:,:,:,:,:), pointer :: tensorField_real !< real representation (some stress or deformation) of field_fourier complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:,:,:), pointer :: tensorField_fourier !< field on which the Fourier transform operates real(C_DOUBLE), public, dimension(:,:,:,:), pointer :: vectorField_real !< vector field real representation for fftw complex(C_DOUBLE_COMPLEX),public, dimension(:,:,:,:), pointer :: vectorField_fourier !< vector field fourier representation for fftw @@ -156,7 +156,7 @@ subroutine spectral_utilities_init integer(C_INTPTR_T) :: alloc_local, local_K, local_K_offset integer(C_INTPTR_T), parameter :: & scalarSize = 1_C_INTPTR_T, & - vecSize = 3_C_INTPTR_T, & + vectorSize = 3_C_INTPTR_T, & tensorSize = 9_C_INTPTR_T character(len=*), parameter :: & PETSCDEBUG = ' -snes_view -snes_monitor ' @@ -274,7 +274,7 @@ subroutine spectral_utilities_init call c_f_pointer(tensorField, tensorField_fourier, [3_C_INTPTR_T,3_C_INTPTR_T, & gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T , gridFFTW(2),local_K]) ! place a pointer for a fourier tensor representation - vectorField = fftw_alloc_complex(vecSize*alloc_local) + vectorField = fftw_alloc_complex(vectorSize*alloc_local) call c_f_pointer(vectorField, vectorField_real, [3_C_INTPTR_T,& 2_C_INTPTR_T*(gridFFTW(1)/2_C_INTPTR_T + 1_C_INTPTR_T),gridFFTW(2),local_K]) ! place a pointer for a real vector representation call c_f_pointer(vectorField, vectorField_fourier,[3_C_INTPTR_T,& @@ -288,42 +288,42 @@ subroutine spectral_utilities_init !-------------------------------------------------------------------------------------------------- ! tensor MPI fftw plans - planTensorForth = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order - tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock - tensorField_real, tensorField_fourier, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planTensorForth)) error stop 'FFTW error' - planTensorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order - tensorSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock - tensorField_fourier,tensorField_real, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision - if (.not. C_ASSOCIATED(planTensorBack)) error stop 'FFTW error' + planTensorForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),tensorSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & + tensorField_real,tensorField_fourier, & + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planTensorForth)) error stop 'FFTW error' + planTensorBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),tensorSize, & + FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + tensorField_fourier,tensorField_real, & + PETSC_COMM_WORLD, FFTW_planner_flag) + if (.not. c_associated(planTensorBack)) error stop 'FFTW error' !-------------------------------------------------------------------------------------------------- ! vector MPI fftw plans - planVectorForth = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order - vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK,&! no. of transforms, default iblock and oblock - vectorField_real, vectorField_fourier, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planVectorForth)) error stop 'FFTW error' - planVectorBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order - vecSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & ! no. of transforms, default iblock and oblock - vectorField_fourier,vectorField_real, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! all processors, planer precision - if (.not. C_ASSOCIATED(planVectorBack)) error stop 'FFTW error' + planVectorForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),vectorSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & + vectorField_real,vectorField_fourier, & + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planVectorForth)) error stop 'FFTW error' + planVectorBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),vectorSize, & + FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + vectorField_fourier,vectorField_real, & + PETSC_COMM_WORLD, FFTW_planner_flag) + if (.not. c_associated(planVectorBack)) error stop 'FFTW error' !-------------------------------------------------------------------------------------------------- ! scalar MPI fftw plans - planScalarForth = fftw_mpi_plan_many_dft_r2c(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order - scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock - scalarField_real, scalarField_fourier, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planScalarForth)) error stop 'FFTW error' - planScalarBack = fftw_mpi_plan_many_dft_c2r(3, [gridFFTW(3),gridFFTW(2),gridFFTW(1)], & ! dimension, logical length in each dimension in reversed order, no. of transforms - scalarSize, FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, &! no. of transforms, default iblock and oblock - scalarField_fourier,scalarField_real, & ! input data, output data - PETSC_COMM_WORLD, FFTW_planner_flag) ! use all processors, planer precision - if (.not. C_ASSOCIATED(planScalarBack)) error stop 'FFTW error' + planScalarForth = fftw_mpi_plan_many_dft_r2c(3,gridFFTW(3:1:-1),scalarSize, & + FFTW_MPI_DEFAULT_BLOCK,FFTW_MPI_DEFAULT_BLOCK, & + scalarField_real,scalarField_fourier, & + PETSC_COMM_WORLD,FFTW_planner_flag) + if (.not. c_associated(planScalarForth)) error stop 'FFTW error' + planScalarBack = fftw_mpi_plan_many_dft_c2r(3,gridFFTW(3:1:-1),scalarSize, & + FFTW_MPI_DEFAULT_BLOCK, FFTW_MPI_DEFAULT_BLOCK, & + scalarField_fourier,scalarField_real, & + PETSC_COMM_WORLD, FFTW_planner_flag) + if (.not. c_associated(planScalarBack)) error stop 'FFTW error' !-------------------------------------------------------------------------------------------------- ! calculation of discrete angular frequencies, ordered as in FFTW (wrap around) From a37178ddeea46c8f4ab44dadcd1d1c0c22a12eb0 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Jan 2022 12:28:39 +0100 Subject: [PATCH 45/58] handle case of -0.0 -0.0 < 0.0 ! false so need sign to change direction for also for the corner case of -0.0 --- src/rotations.f90 | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rotations.f90 b/src/rotations.f90 index b4728e38b..7629060cb 100644 --- a/src/rotations.f90 +++ b/src/rotations.f90 @@ -270,7 +270,7 @@ pure elemental subroutine standardize(self) class(rotation), intent(inout) :: self - if (self%q(1) < 0.0_pReal) self%q = - self%q + if (sign(1.0_pReal,self%q(1)) < 0.0_pReal) self%q = - self%q end subroutine standardize @@ -450,7 +450,7 @@ pure function qu2om(qu) result(om) om(3,2) = 2.0_pReal*(qu(4)*qu(3)+qu(1)*qu(2)) om(1,3) = 2.0_pReal*(qu(2)*qu(4)+qu(1)*qu(3)) - if (P < 0.0_pReal) om = transpose(om) + if (sign(1.0_pReal,P) < 0.0_pReal) om = transpose(om) end function qu2om @@ -480,7 +480,7 @@ pure function qu2eu(qu) result(eu) atan2( 2.0_pReal*chi, q03-q12 ), & atan2(( P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)+qu(3)*qu(4))*chi )] endif degenerated - where(eu<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI]) + where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI]) end function qu2eu @@ -602,7 +602,7 @@ pure function om2qu(om) result(qu) qu = [ (om(2,1) - om(1,2)) /s,(om(1,3) + om(3,1)) / s,(om(2,3) + om(3,2)) / s,0.25_pReal * s] endif endif - if(qu(1)<0._pReal) qu =-1.0_pReal * qu + if(sign(1.0_pReal,qu(1))<0.0_pReal) qu =-1.0_pReal * qu qu = qu*[1.0_pReal,P,P,P] end function om2qu @@ -628,7 +628,7 @@ pure function om2eu(om) result(eu) eu = [atan2(om(1,2),om(1,1)), 0.5_pReal*PI*(1.0_pReal-om(3,3)),0.0_pReal ] end if where(abs(eu) < 1.e-8_pReal) eu = 0.0_pReal - where(eu<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI]) + where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI]) end function om2eu @@ -735,7 +735,7 @@ pure function eu2qu(eu) result(qu) -P*sPhi*cos(ee(1)-ee(3)), & -P*sPhi*sin(ee(1)-ee(3)), & -P*cPhi*sin(ee(1)+ee(3))] - if(qu(1) < 0.0_pReal) qu = qu * (-1.0_pReal) + if(sign(1.0_pReal,qu(1)) < 0.0_pReal) qu = qu * (-1.0_pReal) end function eu2qu @@ -792,7 +792,7 @@ pure function eu2ax(eu) result(ax) else ax(1:3) = -P/tau * [ t*cos(delta), t*sin(delta), sin(sigma) ] ! passive axis-angle pair so a minus sign in front ax(4) = alpha - if (alpha < 0.0_pReal) ax = -ax ! ensure alpha is positive + if (sign(1.0_pReal,alpha) < 0.0_pReal) ax = -ax ! ensure alpha is positive end if end function eu2ax From 6657e5c4ebbbfa997cbbf6a5a121dbba90abc4b4 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sat, 15 Jan 2022 13:22:15 +0100 Subject: [PATCH 46/58] testing a few corner cases behavior for __eq__ is now following python standard, comparing to wrong class gives False --- python/damask/_result.py | 2 +- python/tests/test_Colormap.py | 1 + python/tests/test_Crystal.py | 3 +++ python/tests/test_Grid.py | 1 + python/tests/test_Orientation.py | 9 +++++++++ python/tests/test_Result.py | 3 +++ python/tests/test_Rotation.py | 9 +++++++++ 7 files changed, 27 insertions(+), 1 deletion(-) diff --git a/python/damask/_result.py b/python/damask/_result.py index c87b51a89..02d4174c9 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -981,7 +981,7 @@ class Result: t = 'tensor' if o is None: o = 'fro' else: - raise ValueError(f'invalid norm order {ord}') + raise ValueError(f'invalid shape of {x["label"]}') return { 'data': np.linalg.norm(x['data'],ord=o,axis=axis,keepdims=True), diff --git a/python/tests/test_Colormap.py b/python/tests/test_Colormap.py index 156907670..2321745aa 100644 --- a/python/tests/test_Colormap.py +++ b/python/tests/test_Colormap.py @@ -81,6 +81,7 @@ class TestColormap: assert Colormap.from_predefined('strain') == Colormap.from_predefined('strain') assert Colormap.from_predefined('strain') != Colormap.from_predefined('stress') assert Colormap.from_predefined('strain',N=128) != Colormap.from_predefined('strain',N=64) + assert not Colormap.from_predefined('strain',N=128) == 1 @pytest.mark.parametrize('low,high',[((0,0,0),(1,1,1)), ([0,0,0],[1,1,1])]) diff --git a/python/tests/test_Crystal.py b/python/tests/test_Crystal.py index 9c6de965d..235ed554a 100644 --- a/python/tests/test_Crystal.py +++ b/python/tests/test_Crystal.py @@ -40,6 +40,9 @@ class TestCrystal: alpha=alpha,beta=beta,gamma=gamma) assert np.allclose(np.eye(3),np.einsum('ik,jk',c.basis_real,c.basis_reciprocal)) + def test_basis_invalid(self): + with pytest.raises(KeyError): + Crystal(family='cubic').basis_real @pytest.mark.parametrize('keyFrame,keyLattice',[('uvw','direction'),('hkl','plane'),]) @pytest.mark.parametrize('vector',np.array([ diff --git a/python/tests/test_Grid.py b/python/tests/test_Grid.py index 2b9ca22f4..6dd94f4bb 100644 --- a/python/tests/test_Grid.py +++ b/python/tests/test_Grid.py @@ -44,6 +44,7 @@ class TestGrid: def test_equal(self,default): assert default == default + assert not default == 42 def test_repr(self,default): print(default) diff --git a/python/tests/test_Orientation.py b/python/tests/test_Orientation.py index 9f623fc6c..2a32adea4 100644 --- a/python/tests/test_Orientation.py +++ b/python/tests/test_Orientation.py @@ -364,6 +364,11 @@ class TestOrientation: table.save(reference) assert np.allclose(P,Table.load(reference).get('Schmid')) + def test_Schmid_invalid(self): + with pytest.raises(KeyError): + Orientation(lattice='fcc').Schmid() + + ### vectorization tests ### @pytest.mark.parametrize('lattice',['hP','cI','cF']) # tI not included yet @@ -505,3 +510,7 @@ class TestOrientation: for loc in np.random.randint(0,blend,(10,len(blend))): assert np.allclose(o[tuple(loc[:len(o.shape)])].to_pole(uvw=v[tuple(loc[-len(v.shape[:-1]):])]), o.to_pole(uvw=v)[tuple(loc)]) + + def test_mul_invalid(self): + with pytest.raises(TypeError): + Orientation.from_random(lattice='cF')*np.ones(3) diff --git a/python/tests/test_Result.py b/python/tests/test_Result.py index 4fea08eb3..1da56dedf 100644 --- a/python/tests/test_Result.py +++ b/python/tests/test_Result.py @@ -102,6 +102,9 @@ class TestResult: with pytest.raises(AttributeError): default.view('invalid',True) + def test_add_invalid(self,default): + default.add_absolute('xxxx') + def test_add_absolute(self,default): default.add_absolute('F_e') in_memory = np.abs(default.place('F_e')) diff --git a/python/tests/test_Rotation.py b/python/tests/test_Rotation.py index 2d623adf5..a431bc64b 100644 --- a/python/tests/test_Rotation.py +++ b/python/tests/test_Rotation.py @@ -792,6 +792,11 @@ class TestRotation: R = Rotation.from_random(shape,rng_seed=1) assert R == R if shape is None else (R == R).all() + @pytest.mark.parametrize('shape',[None,5,(4,6)]) + def test_allclose(self,shape): + R = Rotation.from_random(shape,rng_seed=1) + assert R.allclose(R) + @pytest.mark.parametrize('shape',[None,5,(4,6)]) def test_unequal(self,shape): R = Rotation.from_random(shape,rng_seed=1) @@ -1124,3 +1129,7 @@ class TestRotation: weights_r = np.histogramdd(Eulers_r,steps,rng)[0].flatten(order='F')/N * np.sum(weights) assert np.sqrt(((weights_r - weights) ** 2).mean()) < 5 + + def test_mul_invalid(self): + with pytest.raises(TypeError): + Rotation.from_random()*np.ones(3) From a545baebbf07fd53e2b3a5db3bd7d3a827d90faa Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 16 Jan 2022 08:18:06 +0100 Subject: [PATCH 47/58] VTK polishing avoid segmentation fault (also allows testing) --- python/damask/__init__.py | 2 +- python/damask/_grid.py | 2 +- python/damask/_vtk.py | 18 ++++++++++-------- python/tests/test_VTK.py | 4 ++++ 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/python/damask/__init__.py b/python/damask/__init__.py index 584a97e87..aa3c36b07 100644 --- a/python/damask/__init__.py +++ b/python/damask/__init__.py @@ -21,8 +21,8 @@ from ._rotation import Rotation # noqa from ._crystal import Crystal # noqa from ._orientation import Orientation # noqa from ._table import Table # noqa -from ._vtk import VTK # noqa from ._colormap import Colormap # noqa +from ._vtk import VTK # noqa from ._config import Config # noqa from ._configmaterial import ConfigMaterial # noqa from ._grid import Grid # noqa diff --git a/python/damask/_grid.py b/python/damask/_grid.py index 5006e1b40..002d00259 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -674,7 +674,7 @@ class Grid: def show(self) -> None: """Show on screen.""" - VTK.from_rectilinear_grid(self.cells,self.size,self.origin).show() + VTK.from_image_data(self.cells,self.size,self.origin).show() def add_primitive(self, diff --git a/python/damask/_vtk.py b/python/damask/_vtk.py index dbbfb1b10..6bb7a5d1c 100644 --- a/python/damask/_vtk.py +++ b/python/damask/_vtk.py @@ -442,22 +442,24 @@ class VTK: mapper = vtk.vtkDataSetMapper() mapper.SetInputData(self.vtk_data) + actor = vtk.vtkActor() actor.SetMapper(mapper) + actor.GetProperty().SetColor(230/255,150/255,68/255) ren = vtk.vtkRenderer() + ren.AddActor(actor) + ren.SetBackground(67/255,128/255,208/255) window = vtk.vtkRenderWindow() window.AddRenderer(ren) - - ren.AddActor(actor) - ren.SetBackground(0.2,0.2,0.2) - window.SetSize(width,height) + window.SetWindowName(util.execution_stamp('VTK','show')) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(window) - - iren.Initialize() - window.Render() - iren.Start() + if os.name == 'posix' and 'DISPLAY' not in os.environ: + print('Found no rendering device') + else: + window.Render() + iren.Start() diff --git a/python/tests/test_VTK.py b/python/tests/test_VTK.py index 68224ff33..e59409a20 100644 --- a/python/tests/test_VTK.py +++ b/python/tests/test_VTK.py @@ -28,6 +28,10 @@ class TestVTK: def _patch_execution_stamp(self, patch_execution_stamp): print('patched damask.util.execution_stamp') + def test_show(sef,default,monkeypatch): + monkeypatch.delenv('DISPLAY',raising=False) + default.show() + def test_rectilinearGrid(self,tmp_path): cells = np.random.randint(5,10,3)*2 size = np.random.random(3) + 1.0 From 4cd260881aa7c7a2b5eb64b700ca49770b4b45e6 Mon Sep 17 00:00:00 2001 From: Test User Date: Mon, 17 Jan 2022 23:52:17 +0100 Subject: [PATCH 48/58] [skip ci] updated version information after successful test of v3.0.0-alpha5-441-g8b5122f52 --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index 11b9d06bd..278b0bc78 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-389-ga000e477c +v3.0.0-alpha5-441-g8b5122f52 From c5fd6a36777e68013ba6d7839d720bc99db90193 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Mon, 17 Jan 2022 07:41:17 +0100 Subject: [PATCH 49/58] random seed should contain some entropy setting all elements to the same value does not make sense. Moreover, negative values are allowed --- PRIVATE | 2 +- src/math.f90 | 32 ++++++++++++++------------------ 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/PRIVATE b/PRIVATE index b898a8b55..d570b4a1f 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit b898a8b5552bd9d1c555edc3d8134564dd32fe53 +Subproject commit d570b4a1fac5b9b99d026d3ff9bf593615e22ce5 diff --git a/src/math.f90 b/src/math.f90 index 3ea4d6a08..db0666ab0 100644 --- a/src/math.f90 +++ b/src/math.f90 @@ -84,38 +84,34 @@ contains subroutine math_init real(pReal), dimension(4) :: randTest - integer :: & - randSize, & - randomSeed !< fixed seeding for pseudo-random number generator, Default 0: use random seed - integer, dimension(:), allocatable :: randInit + integer :: randSize + integer, dimension(:), allocatable :: seed class(tNode), pointer :: & num_generic + print'(/,1x,a)', '<<<+- math init -+>>>'; flush(IO_STDOUT) num_generic => config_numerics%get('generic',defaultVal=emptyDict) - randomSeed = num_generic%get_asInt('random_seed', defaultVal = 0) call random_seed(size=randSize) - allocate(randInit(randSize)) - if (randomSeed > 0) then - randInit = randomSeed + allocate(seed(randSize)) + + if (num_generic%contains('random_seed')) then + seed = num_generic%get_as1dInt('random_seed',requiredSize=randSize) else call random_seed() - call random_seed(get = randInit) - randInit(2:randSize) = randInit(1) - endif + call random_seed(get = seed) + end if - call random_seed(put = randInit) + call random_seed(put = seed) call random_number(randTest) - print'(/,a,i2)', ' size of random seed: ', randSize - print'( a,i0)', ' value of random seed: ', randInit(1) - print'( a,4(/,26x,f17.14),/)', ' start of random sequence: ', randTest + print'(/,a,i2)', ' size of random seed: ', randSize + print*, 'value of random seed: ', seed + print'( a,4(/,26x,f17.14))', ' start of random sequence: ', randTest - call random_seed(put = randInit) - - call selfTest + call selfTest() end subroutine math_init From 632180d67e347225eaba26f7e03d5380cc30242b Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 18 Jan 2022 14:27:47 +0100 Subject: [PATCH 50/58] not needed --- python/damask/_rotation.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/python/damask/_rotation.py b/python/damask/_rotation.py index fd19ec31b..8e5f7f7f9 100644 --- a/python/damask/_rotation.py +++ b/python/damask/_rotation.py @@ -812,8 +812,7 @@ class Rotation: return Rotation.from_basis(R) @staticmethod - def from_parallel(a,b, - **kwargs): + def from_parallel(a,b): """ Initialize from pairs of two orthogonal lattice basis vectors. @@ -963,8 +962,7 @@ class Rotation: N = 500, degrees = True, fractions = True, - rng_seed = None, - **kwargs): + rng_seed = None): """ Sample discrete values from a binned orientation distribution function (ODF). From c3b492c5da9b3ca75dd4b0c8c8235270563b193c Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Tue, 18 Jan 2022 21:06:39 +0100 Subject: [PATCH 51/58] new PETSc version with backported fix for ML --- .gitlab-ci.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6da114938..f3e74e030 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,7 +46,7 @@ variables: # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1" PETSC_INTELLLVM: "Libraries/PETSc/3.16.3/oneAPI-2022.0.1-IntelMPI-2021.5.0" - PETSC_INTEL: "Libraries/PETSc/3.16.2/Intel-2022.0.1-IntelMPI-2021.5.0" + PETSC_INTEL: "Libraries/PETSc/3.16.3/Intel-2022.0.1-IntelMPI-2021.5.0" # ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ MSC: "FEM/MSC/2021.3.1" IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" @@ -79,63 +79,63 @@ mypy: ################################################################################################### -test_grid_GNU: +grid_GNU: stage: compile script: - module load ${COMPILER_GNU} ${MPI_GNU} ${PETSC_GNU} - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU -test_mesh_GNU: +mesh_GNU: stage: compile script: - module load ${COMPILER_GNU} ${MPI_GNU} ${PETSC_GNU} - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU -test_grid_GNU-64bit: +grid_GNU-64bit: stage: compile script: - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_GNU-64bit -test_mesh_GNU-64bit: +mesh_GNU-64bit: stage: compile script: - module load Compiler/GNU/10 Libraries/PETSc/3.16.2/64bit - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU-64bit -test_grid_IntelLLVM: +grid_IntelLLVM: stage: compile script: - module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM} - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_IntelLLVM -test_mesh_IntelLLVM: +mesh_IntelLLVM: stage: compile script: - module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM} - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_IntelLLVM -test_grid_Intel: +grid_Intel: stage: compile script: - module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL} - cd PRIVATE/testing/pytest - pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_Intel -test_mesh_Intel: +mesh_Intel: stage: compile script: - module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL} - cd PRIVATE/testing/pytest - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_Intel -test_Marc: +Marc_Intel: stage: compile script: - module load $IntelMarc $HDF5Marc $MSC @@ -158,7 +158,7 @@ setup_mesh: - cmake -DDAMASK_SOLVER=MESH -DCMAKE_INSTALL_PREFIX=${TESTROOT} ${CI_PROJECT_DIR} - make -j2 all install -compile_Marc: +setup_Marc: stage: compile script: - module load $IntelMarc $HDF5Marc $MSC From 6fb4320fc29078f287e65ffcbf05eec4cb1a6de8 Mon Sep 17 00:00:00 2001 From: Test User Date: Wed, 19 Jan 2022 17:02:22 +0100 Subject: [PATCH 52/58] [skip ci] updated version information after successful test of v3.0.0-alpha5-451-g3fbf1459d --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index 278b0bc78..75b174809 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-441-g8b5122f52 +v3.0.0-alpha5-451-g3fbf1459d From 77f22318a885cdf2f564b054530b0f6bd7a1f29d Mon Sep 17 00:00:00 2001 From: Daniel Otto de Mentock Date: Thu, 20 Jan 2022 11:52:56 +0000 Subject: [PATCH 53/58] Typehint improvements --- python/damask/_colormap.py | 2 +- python/damask/_grid.py | 26 +++++++++++--------------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/python/damask/_colormap.py b/python/damask/_colormap.py index 2c7f01d94..1ccc292e6 100644 --- a/python/damask/_colormap.py +++ b/python/damask/_colormap.py @@ -250,7 +250,7 @@ class Colormap(mpl.colors.ListedColormap): np.logical_or (np.isnan(field), field == gap)) # mask NaN (and gap if present) l,r = (field[mask].min(),field[mask].max()) if bounds is None else \ - np.array(bounds,float)[:2] + (bounds[0],bounds[1]) delta,avg = r-l,0.5*abs(r+l) diff --git a/python/damask/_grid.py b/python/damask/_grid.py index 002d00259..eefd1e0be 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -428,7 +428,7 @@ class Grid: seeds_p = np.vstack((seeds_p-np.array([0.,size[1],0.]),seeds_p,seeds_p+np.array([0.,size[1],0.]))) seeds_p = np.vstack((seeds_p-np.array([0.,0.,size[2]]),seeds_p,seeds_p+np.array([0.,0.,size[2]]))) else: - weights_p = weights + weights_p = np.array(weights,float) seeds_p = seeds coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3) @@ -841,7 +841,8 @@ class Grid: if not set(directions).issubset(valid): raise ValueError(f'invalid direction {set(directions).difference(valid)} specified') - mat = np.flip(self.material, (valid.index(d) for d in directions if d in valid)) + + mat = np.flip(self.material, [valid.index(d) for d in directions if d in valid]) return Grid(material = mat, size = self.size, @@ -973,14 +974,13 @@ class Grid: Updated grid-based geometry. """ - if fill is None: fill = np.nanmax(self.material) + 1 - dtype = float if isinstance(fill,float) or self.material.dtype in np.sctypes['float'] else int - material = self.material # These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'') # see https://www.cs.utexas.edu/~theshark/courses/cs354/lectures/cs354-14.pdf for angle,axes in zip(R.as_Euler_angles(degrees=True)[::-1], [(0,1),(1,2),(0,1)]): - material_temp = ndimage.rotate(material,angle,axes,order=0,prefilter=False,output=dtype,cval=fill) + material_temp = ndimage.rotate(material,angle,axes,order=0,prefilter=False, + output=self.material.dtype, + cval=np.nanmax(self.material) + 1 if fill is None else fill) # avoid scipy interpolation errors for rotations close to multiples of 90° material = material_temp if np.prod(material_temp.shape) != np.prod(material.shape) else \ np.rot90(material,k=np.rint(angle/90.).astype(int),axes=axes) @@ -1031,10 +1031,8 @@ class Grid: """ offset_ = np.array(offset,int) if offset is not None else np.zeros(3,int) cells_ = np.array(cells,int) if cells is not None else self.cells - if fill is None: fill = np.nanmax(self.material) + 1 - dtype = float if int(fill) != fill or self.material.dtype in np.sctypes['float'] else int - canvas = np.full(cells_,fill,dtype) + canvas = np.full(cells_,np.nanmax(self.material) + 1 if fill is None else fill,self.material.dtype) LL = np.clip( offset_, 0,np.minimum(self.cells, cells_+offset_)) UR = np.clip( offset_+cells_, 0,np.minimum(self.cells, cells_+offset_)) @@ -1067,13 +1065,11 @@ class Grid: Updated grid-based geometry. """ - def mp(entry, mapper): - return mapper[entry] if entry in mapper else entry + material = self.material.copy() + for f,t in zip(from_material,to_material): # ToDo Python 3.10 has strict mode for zip + material[self.material==f] = t - mp = np.vectorize(mp) - mapper = dict(zip(from_material,to_material)) - - return Grid(material = mp(self.material,mapper).reshape(self.cells), + return Grid(material = material, size = self.size, origin = self.origin, comments = self.comments+[util.execution_stamp('Grid','substitute')], From 48f38e74d71c0d259fa99d70d6f00a1ed0b43693 Mon Sep 17 00:00:00 2001 From: Test User Date: Thu, 20 Jan 2022 15:11:14 +0100 Subject: [PATCH 54/58] [skip ci] updated version information after successful test of v3.0.0-alpha5-454-gb64a603ef --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index 75b174809..208571663 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-451-g3fbf1459d +v3.0.0-alpha5-454-gb64a603ef From c2a822d8fd36a17f67e1d0fc31b9c514a7a633a1 Mon Sep 17 00:00:00 2001 From: Sharan Roongta Date: Thu, 20 Jan 2022 23:45:47 +0000 Subject: [PATCH 55/58] keys() function for dictionary as in Python. Does not work for gfortran which fails with an ICE. --- src/YAML_types.f90 | 28 ++++++++++++++++++++++++++++ src/material.f90 | 11 +++++++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/src/YAML_types.f90 b/src/YAML_types.f90 index 5884c2850..6e926b39e 100644 --- a/src/YAML_types.f90 +++ b/src/YAML_types.f90 @@ -62,6 +62,8 @@ module YAML_types tNode_get_byKey_as1dString => tNode_get_byKey_as1dString procedure :: & getKey => tNode_get_byIndex_asKey + procedure :: & + Keys => tNode_getKeys procedure :: & getIndex => tNode_get_byKey_asIndex procedure :: & @@ -625,6 +627,32 @@ function tNode_get_byIndex_asKey(self,i) result(key) end function tNode_get_byIndex_asKey +!-------------------------------------------------------------------------------------------------- +!> @brief Get all keys from a dictionary +!-------------------------------------------------------------------------------------------------- +function tNode_getKeys(self) result(keys) + + class(tNode), intent(in) :: self + character(len=:), dimension(:), allocatable :: keys + + character(len=pStringLen), dimension(:), allocatable :: temp + integer :: j, l + + allocate(temp(self%length)) + l = 0 + do j = 1, self%length + temp(j) = self%getKey(j) + l = max(len_trim(temp(j)),l) + end do + + allocate(character(l)::keys(self%length)) + do j = 1, self%length + keys(j) = trim(temp(j)) + end do + +end function tNode_getKeys + + !------------------------------------------------------------------------------------------------- !> @brief Checks if a given key/item is present in the dict/list !------------------------------------------------------------------------------------------------- diff --git a/src/material.f90 b/src/material.f90 index 52940ee4a..1e3a4b4ec 100644 --- a/src/material.f90 +++ b/src/material.f90 @@ -108,8 +108,14 @@ subroutine parse() homogenizations => config_material%get('homogenization') call sanityCheck(materials, homogenizations) + +#if defined (__GFORTRAN__) material_name_phase = getKeys(phases) material_name_homogenization = getKeys(homogenizations) +#else + material_name_phase = phases%Keys() + material_name_homogenization = homogenizations%Keys() +#endif allocate(homogenization_Nconstituents(homogenizations%length)) do h=1, homogenizations%length @@ -203,9 +209,9 @@ subroutine sanityCheck(materials,homogenizations) end subroutine sanityCheck - +#if defined (__GFORTRAN__) !-------------------------------------------------------------------------------------------------- -!> @brief Get all keys from a dictionary +!> @brief %keys() is broken on gfortran !-------------------------------------------------------------------------------------------------- function getKeys(dict) @@ -228,5 +234,6 @@ function getKeys(dict) end do end function getKeys +#endif end module material From 4b601a38e7f2904f95f3550c511b94627c1d58bc Mon Sep 17 00:00:00 2001 From: Test User Date: Fri, 21 Jan 2022 03:05:10 +0100 Subject: [PATCH 56/58] [skip ci] updated version information after successful test of v3.0.0-alpha5-457-g75040047c --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index 208571663..beee5f0d7 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-454-gb64a603ef +v3.0.0-alpha5-457-g75040047c From f1051ec28f58723c73bbef8b45090d1fc02e63f3 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Fri, 21 Jan 2022 23:34:42 +0000 Subject: [PATCH 57/58] polishing --- src/YAML_types.f90 | 258 ++++++++++++++++++++++----------------------- 1 file changed, 129 insertions(+), 129 deletions(-) diff --git a/src/YAML_types.f90 b/src/YAML_types.f90 index 6e926b39e..c57f602cf 100644 --- a/src/YAML_types.f90 +++ b/src/YAML_types.f90 @@ -119,7 +119,7 @@ module YAML_types type, extends(tNode), public :: tList - class(tItem), pointer :: first => null() + class(tItem), pointer :: first => NULL() contains procedure :: asFormattedString => tList_asFormattedString @@ -146,8 +146,8 @@ module YAML_types type :: tItem character(len=:), allocatable :: key - class(tNode), pointer :: node => null() - class(tItem), pointer :: next => null() + class(tNode), pointer :: node => NULL() + class(tItem), pointer :: next => NULL() contains final :: tItem_finalize @@ -223,22 +223,22 @@ subroutine selfTest select type(s1) class is(tScalar) s1 = '2' - endselect + end select select type(s2) class is(tScalar) s2 = '3' - endselect + end select select type(s3) class is(tScalar) s3 = '4' - endselect + end select select type(s4) class is(tScalar) s4 = '5' - endselect + end select allocate(tList::l1) @@ -251,14 +251,14 @@ subroutine selfTest if (any(dNeq(l1%as1dFloat(),[2.0_pReal,3.0_pReal]))) error stop 'tList_as1dFloat' if (n%get_asInt(1) /= 2) error stop 'byIndex_asInt' if (dNeq(n%get_asFloat(2),3.0_pReal)) error stop 'byIndex_asFloat' - endselect + end select allocate(tList::l3) select type(l3) class is(tList) call l3%append(s3) call l3%append(s4) - endselect + end select allocate(tList::l2) select type(l2) @@ -334,9 +334,12 @@ function tNode_asScalar(self) result(scalar) class(tNode), intent(in), target :: self class(tScalar), pointer :: scalar + select type(self) class is(tScalar) scalar => self + class default + nullify(scalar) end select end function tNode_asScalar @@ -350,9 +353,12 @@ function tNode_asList(self) result(list) class(tNode), intent(in), target :: self class(tList), pointer :: list + select type(self) class is(tList) list => self + class default + nullify(list) end select end function tNode_asList @@ -366,9 +372,12 @@ function tNode_asDict(self) result(dict) class(tNode), intent(in), target :: self class(tDict), pointer :: dict + select type(self) class is(tDict) dict => self + class default + nullify(dict) end select end function tNode_asDict @@ -387,12 +396,13 @@ function tNode_get_byIndex(self,i) result(node) class(tItem), pointer :: item integer :: j + select type(self) class is(tList) self_ => self%asList() class default call IO_error(706,ext_msg='Expected list') - endselect + end select item => self_%first @@ -411,15 +421,14 @@ end function tNode_get_byIndex !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_asFloat(self,i) result(nodeAsFloat) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i real(pReal) :: nodeAsFloat - class(tNode), pointer :: node type(tScalar), pointer :: scalar - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tScalar) scalar => node%asScalar() nodeAsFloat = scalar%asFloat() @@ -435,15 +444,15 @@ end function tNode_get_byIndex_asFloat !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_asInt(self,i) result(nodeAsInt) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i integer :: nodeAsInt class(tNode), pointer :: node type(tScalar), pointer :: scalar - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tScalar) scalar => node%asScalar() nodeAsInt = scalar%asInt() @@ -459,21 +468,20 @@ end function tNode_get_byIndex_asInt !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_asBool(self,i) result(nodeAsBool) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i logical :: nodeAsBool - class(tNode), pointer :: node type(tScalar), pointer :: scalar - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tScalar) scalar => node%asScalar() nodeAsBool = scalar%asBool() class default call IO_error(706,ext_msg='Expected scalar Boolean') - endselect + end select end function tNode_get_byIndex_asBool @@ -483,21 +491,20 @@ end function tNode_get_byIndex_asBool !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_asString(self,i) result(nodeAsString) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i character(len=:), allocatable :: nodeAsString - class(tNode), pointer :: node type(tScalar), pointer :: scalar - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tScalar) scalar => node%asScalar() nodeAsString = scalar%asString() class default call IO_error(706,ext_msg='Expected scalar string') - endselect + end select end function tNode_get_byIndex_asString @@ -507,21 +514,20 @@ end function tNode_get_byIndex_asString !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_as1dFloat(self,i) result(nodeAs1dFloat) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i real(pReal), dimension(:), allocatable :: nodeAs1dFloat - class(tNode), pointer :: node class(tList), pointer :: list - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tList) list => node%asList() nodeAs1dFloat = list%as1dFloat() class default call IO_error(706,ext_msg='Expected list of floats') - endselect + end select end function tNode_get_byIndex_as1dFloat @@ -531,21 +537,20 @@ end function tNode_get_byIndex_as1dFloat !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_as1dInt(self,i) result(nodeAs1dInt) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i integer, dimension(:), allocatable :: nodeAs1dInt - class(tNode), pointer :: node class(tList), pointer :: list - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tList) list => node%asList() nodeAs1dInt = list%as1dInt() class default call IO_error(706,ext_msg='Expected list of integers') - endselect + end select end function tNode_get_byIndex_as1dInt @@ -555,21 +560,20 @@ end function tNode_get_byIndex_as1dInt !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_as1dBool(self,i) result(nodeAs1dBool) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i logical, dimension(:), allocatable :: nodeAs1dBool - class(tNode), pointer :: node class(tList), pointer :: list - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tList) list => node%asList() nodeAs1dBool = list%as1dBool() class default call IO_error(706,ext_msg='Expected list of Booleans') - endselect + end select end function tNode_get_byIndex_as1dBool @@ -579,21 +583,20 @@ end function tNode_get_byIndex_as1dBool !-------------------------------------------------------------------------------------------------- function tNode_get_byIndex_as1dString(self,i) result(nodeAs1dString) - class(tNode), intent(in), target :: self - integer, intent(in) :: i + class(tNode), intent(in) :: self + integer, intent(in) :: i character(len=:), allocatable, dimension(:) :: nodeAs1dString - class(tNode), pointer :: node type(tList), pointer :: list - node => self%get(i) - select type(node) + + select type(node => self%get(i)) class is(tList) list => node%asList() nodeAs1dString = list%as1dString() class default call IO_error(706,ext_msg='Expected list of strings') - endselect + end select end function tNode_get_byIndex_as1dString @@ -611,16 +614,17 @@ function tNode_get_byIndex_asKey(self,i) result(key) type(tDict), pointer :: dict type(tItem), pointer :: item + select type(self) class is(tDict) dict => self%asDict() item => dict%first do j = 1, min(i,dict%length)-1 item => item%next - enddo + end do class default call IO_error(706,ext_msg='Expected dict') - endselect + end select key = item%key @@ -638,6 +642,7 @@ function tNode_getKeys(self) result(keys) character(len=pStringLen), dimension(:), allocatable :: temp integer :: j, l + allocate(temp(self%length)) l = 0 do j = 1, self%length @@ -686,7 +691,7 @@ function tNode_contains(self,k) result(exists) enddo class default call IO_error(706,ext_msg='Expected list or dict') - endselect + end select end function tNode_contains @@ -714,7 +719,7 @@ function tNode_get_byKey(self,k,defaultVal) result(node) self_ => self%asDict() class default call IO_error(706,ext_msg='Expected dict for key '//k) - endselect + end select j = 1 item => self_%first @@ -741,23 +746,22 @@ end function tNode_get_byKey !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_asFloat(self,k,defaultVal) result(nodeAsFloat) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k - real(pReal), intent(in),optional :: defaultVal + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k + real(pReal), intent(in), optional :: defaultVal real(pReal) :: nodeAsFloat - class(tNode), pointer :: node type(tScalar), pointer :: scalar + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tScalar) scalar => node%asScalar() nodeAsFloat = scalar%asFloat() class default call IO_error(706,ext_msg='Expected scalar float for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAsFloat = defaultVal else @@ -772,23 +776,22 @@ end function tNode_get_byKey_asFloat !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_asInt(self,k,defaultVal) result(nodeAsInt) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k - integer, intent(in),optional :: defaultVal + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k + integer, intent(in), optional :: defaultVal integer :: nodeAsInt - class(tNode), pointer :: node type(tScalar), pointer :: scalar + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tScalar) scalar => node%asScalar() nodeAsInt = scalar%asInt() class default call IO_error(706,ext_msg='Expected scalar integer for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAsInt = defaultVal else @@ -803,23 +806,22 @@ end function tNode_get_byKey_asInt !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_asBool(self,k,defaultVal) result(nodeAsBool) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k - logical, intent(in),optional :: defaultVal + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k + logical, intent(in), optional :: defaultVal logical :: nodeAsBool - class(tNode), pointer :: node type(tScalar), pointer :: scalar + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tScalar) scalar => node%asScalar() nodeAsBool = scalar%asBool() class default call IO_error(706,ext_msg='Expected scalar Boolean for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAsBool = defaultVal else @@ -834,23 +836,22 @@ end function tNode_get_byKey_asBool !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_asString(self,k,defaultVal) result(nodeAsString) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k - character(len=*), intent(in),optional :: defaultVal + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k + character(len=*), intent(in), optional :: defaultVal character(len=:), allocatable :: nodeAsString - class(tNode), pointer :: node type(tScalar), pointer :: scalar + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tScalar) scalar => node%asScalar() nodeAsString = scalar%asString() class default call IO_error(706,ext_msg='Expected scalar string for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAsString = defaultVal else @@ -865,25 +866,24 @@ end function tNode_get_byKey_asString !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_as1dFloat(self,k,defaultVal,requiredSize) result(nodeAs1dFloat) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k real(pReal), intent(in), dimension(:), optional :: defaultVal integer, intent(in), optional :: requiredSize real(pReal), dimension(:), allocatable :: nodeAs1dFloat - class(tNode), pointer :: node type(tList), pointer :: list + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tList) list => node%asList() nodeAs1dFloat = list%as1dFloat() class default call IO_error(706,ext_msg='Expected 1D float array for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAs1dFloat = defaultVal else @@ -902,25 +902,24 @@ end function tNode_get_byKey_as1dFloat !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_as2dFloat(self,k,defaultVal,requiredShape) result(nodeAs2dFloat) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k real(pReal), intent(in), dimension(:,:), optional :: defaultVal integer, intent(in), dimension(2), optional :: requiredShape real(pReal), dimension(:,:), allocatable :: nodeAs2dFloat - class(tNode), pointer :: node type(tList), pointer :: rows + if(self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tList) rows => node%asList() nodeAs2dFloat = rows%as2dFloat() class default call IO_error(706,ext_msg='Expected 2D float array for key '//k) - endselect + end select elseif(present(defaultVal)) then nodeAs2dFloat = defaultVal else @@ -939,24 +938,22 @@ end function tNode_get_byKey_as2dFloat !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_as1dInt(self,k,defaultVal,requiredSize) result(nodeAs1dInt) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k integer, dimension(:), intent(in), optional :: defaultVal integer, intent(in), optional :: requiredSize integer, dimension(:), allocatable :: nodeAs1dInt - class(tNode), pointer :: node type(tList), pointer :: list if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tList) list => node%asList() nodeAs1dInt = list%as1dInt() class default call IO_error(706,ext_msg='Expected 1D integer array for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAs1dInt = defaultVal else @@ -975,23 +972,22 @@ end function tNode_get_byKey_as1dInt !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_as1dBool(self,k,defaultVal) result(nodeAs1dBool) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k logical, dimension(:), intent(in), optional :: defaultVal logical, dimension(:), allocatable :: nodeAs1dBool - class(tNode), pointer :: node type(tList), pointer :: list + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tList) list => node%asList() nodeAs1dBool = list%as1dBool() class default call IO_error(706,ext_msg='Expected 1D Boolean array for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAs1dBool = defaultVal else @@ -1006,23 +1002,22 @@ end function tNode_get_byKey_as1dBool !-------------------------------------------------------------------------------------------------- function tNode_get_byKey_as1dString(self,k,defaultVal) result(nodeAs1dString) - class(tNode), intent(in), target :: self - character(len=*), intent(in) :: k + class(tNode), intent(in) :: self + character(len=*), intent(in) :: k character(len=*), intent(in), dimension(:), optional :: defaultVal character(len=:), allocatable, dimension(:) :: nodeAs1dString - class(tNode), pointer :: node type(tList), pointer :: list + if (self%contains(k)) then - node => self%get(k) - select type(node) + select type(node => self%get(k)) class is(tList) list => node%asList() nodeAs1dString = list%as1dString() class default call IO_error(706,ext_msg='Expected 1D string array for key '//k) - endselect + end select elseif (present(defaultVal)) then nodeAs1dString = defaultVal else @@ -1043,11 +1038,11 @@ function output_as1dString(self) result(output) !ToDo: SR: Re class(tNode), pointer :: output_list integer :: o - output_list => self%get('output',defaultVal=emptyList) + output_list => self%get('output',defaultVal=emptyList) allocate(output(output_list%length)) do o = 1, output_list%length output(o) = output_list%get_asString(o) - enddo + end do end function output_as1dString @@ -1070,7 +1065,7 @@ function tNode_get_byKey_asIndex(self,key) result(keyIndex) do while (associated(item%next) .and. item%key /= key) item => item%next keyIndex = keyIndex+1 - enddo + end do if (item%key /= key) call IO_error(140,ext_msg=key) @@ -1115,7 +1110,7 @@ recursive function tList_asFormattedString(self,indent) result(str) if (i /= 1) str = str//repeat(' ',indent_) str = str//'- '//item%node%asFormattedString(indent_+2) item => item%next - enddo + end do end function tList_asFormattedString @@ -1147,9 +1142,9 @@ recursive function tDict_asFormattedString(self,indent) result(str) str = str//trim(item%key)//': '//item%node%asFormattedString(indent_+len_trim(item%key)+2) class default str = str//trim(item%key)//':'//IO_EOL//repeat(' ',indent_+2)//item%node%asFormattedString(indent_+2) - endselect + end select item => item%next - enddo + end do end function tDict_asFormattedString @@ -1218,13 +1213,14 @@ function tList_as1dFloat(self) type(tItem), pointer :: item type(tScalar), pointer :: scalar + allocate(tList_as1dFloat(self%length)) item => self%first do i = 1, self%length scalar => item%node%asScalar() tList_as1dFloat(i) = scalar%asFloat() item => item%next - enddo + end do end function tList_as1dFloat @@ -1241,6 +1237,7 @@ function tList_as2dFloat(self) class(tNode), pointer :: row type(tList), pointer :: row_data + row => self%get(1) row_data => row%asList() allocate(tList_as2dFloat(self%length,row_data%length)) @@ -1248,9 +1245,9 @@ function tList_as2dFloat(self) do i=1,self%length row => self%get(i) row_data => row%asList() - if(row_data%length /= size(tList_as2dFloat,2)) call IO_error(709,ext_msg='Varying number of columns') + if (row_data%length /= size(tList_as2dFloat,2)) call IO_error(709,ext_msg='Varying number of columns') tList_as2dFloat(i,:) = self%get_as1dFloat(i) - enddo + end do end function tList_as2dFloat @@ -1267,13 +1264,14 @@ function tList_as1dInt(self) type(tItem), pointer :: item type(tScalar), pointer :: scalar + allocate(tList_as1dInt(self%length)) item => self%first do i = 1, self%length scalar => item%node%asScalar() tList_as1dInt(i) = scalar%asInt() item => item%next - enddo + end do end function tList_as1dInt @@ -1290,13 +1288,14 @@ function tList_as1dBool(self) type(tItem), pointer :: item type(tScalar), pointer :: scalar + allocate(tList_as1dBool(self%length)) item => self%first do i = 1, self%length scalar => item%node%asScalar() tList_as1dBool(i) = scalar%asBool() item => item%next - enddo + end do end function tList_as1dBool @@ -1313,13 +1312,14 @@ function tList_as1dString(self) type(tItem), pointer :: item type(tScalar), pointer :: scalar + len_max = 0 item => self%first do i = 1, self%length scalar => item%node%asScalar() len_max = max(len_max, len_trim(scalar%asString())) item => item%next - enddo + end do allocate(character(len=len_max) :: tList_as1dString(self%length)) item => self%first From beb62e1c0aa237eb08f1e5bb3e27b9b376891a11 Mon Sep 17 00:00:00 2001 From: Test User Date: Sat, 22 Jan 2022 02:46:29 +0100 Subject: [PATCH 58/58] [skip ci] updated version information after successful test of v3.0.0-alpha5-460-gfc7c165af --- python/damask/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/damask/VERSION b/python/damask/VERSION index beee5f0d7..189f07735 100644 --- a/python/damask/VERSION +++ b/python/damask/VERSION @@ -1 +1 @@ -v3.0.0-alpha5-457-g75040047c +v3.0.0-alpha5-460-gfc7c165af