Merge remote-tracking branch 'origin/development' into 42-new-coding-style-for-homogenization

This commit is contained in:
Martin Diehl 2018-10-13 17:27:25 +02:00
commit 06d71d9d2c
79 changed files with 335967 additions and 11428 deletions

View File

@ -29,6 +29,11 @@ before_script:
done
- source $DAMASKROOT/env/DAMASK.sh
- cd $DAMASKROOT/PRIVATE/testing
- echo Job start:" $(date)"
###################################################################################################
after_script:
- echo Job end:" $(date)"
###################################################################################################
variables:
@ -47,6 +52,7 @@ variables:
# ===============================================================================================
# ++++++++++++ Compiler ++++++++++++++++++++++++++++++++++++++++++++++
IntelCompiler16_0: "Compiler/Intel/16.0 Libraries/IMKL/2016"
IntelCompiler16_4: "Compiler/Intel/16.4 Libraries/IMKL/2016-4"
IntelCompiler17_0: "Compiler/Intel/17.0 Libraries/IMKL/2017"
IntelCompiler18_1: "Compiler/Intel/18.1 Libraries/IMKL/2018"
GNUCompiler7_3: "Compiler/GNU/7.3"
@ -54,18 +60,17 @@ variables:
IntelCompiler: "$IntelCompiler18_1"
GNUCompiler: "$GNUCompiler7_3"
# ++++++++++++ MPI +++++++++++++++++++++++++++++++++++++++++++++++++++
MPICH3_2Intel17_0: "MPI/Intel/17.0/MPICH/3.2"
MPICH3_2Intel18_1: "MPI/Intel/18.1/MPICH/3.2.1"
MPICH3_2GNU7_3: "MPI/GNU/7.3/MPICH/3.2.1"
# ------------ Defaults ----------------------------------------------
MPICH_GNU: "$MPICH3_2GNU7_3"
MPICH_Intel: "$MPICH3_2Intel18_1"
MPICH_GNU: "$MPICH3_2GNU7_3"
# ++++++++++++ PETSc +++++++++++++++++++++++++++++++++++++++++++++++++
PETSc3_9_1MPICH3_2Intel18_1: "Libraries/PETSc/3.9.1/Intel-18.1-MPICH-3.2.1"
PETSc3_9_1MPICH3_2GNU7_3: "Libraries/PETSc/3.9.1/GNU-7.3-MPICH-3.2.1"
PETSc3_10_0MPICH3_2Intel18_1: "Libraries/PETSc/3.10.0/Intel-18.1-MPICH-3.2.1"
PETSc3_10_0MPICH3_2GNU7_3: "Libraries/PETSc/3.10.0/GNU-7.3-MPICH-3.2.1"
# ------------ Defaults ----------------------------------------------
PETSc_MPICH_Intel: "$PETSc3_9_1MPICH3_2Intel18_1"
PETSc_MPICH_GNU: "$PETSc3_9_1MPICH3_2GNU7_3"
PETSc_MPICH_Intel: "$PETSc3_10_0MPICH3_2Intel18_1"
PETSc_MPICH_GNU: "$PETSc3_10_0MPICH3_2GNU7_3"
# ++++++++++++ FEM +++++++++++++++++++++++++++++++++++++++++++++++++++
Abaqus2017: "FEM/Abaqus/2017"
MSC2017: "FEM/MSC/2017"
@ -347,7 +352,7 @@ TextureComponents:
Marc_compileIfort2017:
stage: compileMarc2017
script:
- module load $IntelCompiler17_0 $MSC2017
- module load $IntelCompiler16_4 $MSC2017
- Marc_compileIfort/test.py -m 2017
except:
- master
@ -357,7 +362,7 @@ Marc_compileIfort2017:
Hex_elastic:
stage: marc
script:
- module load $IntelCompiler17_0 $MSC
- module load $IntelCompiler16_4 $MSC
- Hex_elastic/test.py
except:
- master
@ -366,7 +371,7 @@ Hex_elastic:
CubicFCC_elastic:
stage: marc
script:
- module load $IntelCompiler17_0 $MSC
- module load $IntelCompiler16_4 $MSC
- CubicFCC_elastic/test.py
except:
- master
@ -375,7 +380,7 @@ CubicFCC_elastic:
CubicBCC_elastic:
stage: marc
script:
- module load $IntelCompiler17_0 $MSC
- module load $IntelCompiler16_4 $MSC
- CubicBCC_elastic/test.py
except:
- master
@ -384,7 +389,7 @@ CubicBCC_elastic:
J2_plasticBehavior:
stage: marc
script:
- module load $IntelCompiler17_0 $MSC
- module load $IntelCompiler16_4 $MSC
- J2_plasticBehavior/test.py
except:
- master
@ -394,7 +399,7 @@ J2_plasticBehavior:
Abaqus_compile2017:
stage: compileAbaqus2017
script:
- module load $IntelCompiler16_0 $Abaqus2017
- module load $IntelCompiler16_4 $Abaqus2017
- Abaqus_compileIfort/test.py -a 2017
except:
- master
@ -410,7 +415,7 @@ SpectralExample:
AbaqusExample:
stage: example
script:
- module load $IntelCompiler16_0 $Abaqus
- module load $IntelCompiler16_4 $Abaqus
- Abaqus_example/test.py
only:
- development

@ -1 +1 @@
Subproject commit fa02113fa7a0af3376648e4320318ec337fe79aa
Subproject commit 70f135395370b06c424e0dcf163afbd9d8ad24a1

View File

@ -1 +1 @@
v2.0.2-488-ge0cecd4c
v2.0.2-756-g7217cdac

8
examples/.gitignore vendored
View File

@ -1 +1,9 @@
*.C_ref
*.mesh
*.outputConstitutive
*.outputCrystallite
*.outputHomogenization
*.spectralOut
*.sta
*.vt*
postProc

View File

@ -5,15 +5,10 @@
(output) orientation # quaternion
(output) eulerangles # orientation as Bunge triple in degree
(output) grainrotation # deviation from initial orientation as axis (1-3) and angle in degree (4) in crystal reference coordinates
(output) grainrotationx # deviation from initial orientation as angle in degrees around sample reference x axis
(output) grainrotationy # deviation from initial orientation as angle in degrees around sample reference y axis
(output) grainrotationz # deviation from initial orientation as angle in degrees around sample reference z axis
(output) f # deformation gradient tensor; synonyms: "defgrad"
(output) f # deformation gradient tensor
(output) fe # elastic deformation gradient tensor
(output) fp # plastic deformation gradient tensor
(output) e # total strain as Green-Lagrange tensor
(output) ee # elastic strain as Green-Lagrange tensor
(output) p # first Piola-Kichhoff stress tensor; synonyms: "firstpiola", "1stpiola"
(output) s # second Piola-Kichhoff stress tensor; synonyms: "tstar", "secondpiola", "2ndpiola"
(output) p # first Piola-Kichhoff stress tensor
(output) s # second Piola-Kichhoff stress tensor
(output) lp # plastic velocity gradient tensor
(output) elasmatrix # elastic stiffness matrix

View File

@ -0,0 +1,759 @@
#-------------------#
<homogenization>
#-------------------#
[SX]
mech none
#-------------------#
<crystallite>
#-------------------#
[aLittleSomething]
(output) f
(output) p
#-------------------#
<phase>
#-------------------#
[Aluminum]
elasticity hooke
plasticity phenopowerlaw
lattice_structure fcc
Nslip 12 # per family
Ntwin 0 # per family
c11 106.75e9
c12 60.41e9
c44 28.34e9
gdot0_slip 0.001
n_slip 20
tau0_slip 31e6 # per family
tausat_slip 63e6 # per family
a_slip 2.25
gdot0_twin 0.001
n_twin 20
tau0_twin 31e6 # per family
s_pr 0 # push-up factor for slip saturation due to twinning
twin_b 0
twin_c 0
twin_d 0
twin_e 0
h0_slipslip 75e6
h0_twinslip 0
h0_twintwin 0
interaction_slipslip 1 1 1.4 1.4 1.4 1.4
interaction_sliptwin 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
interaction_twinslip 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
interaction_twintwin 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
atol_resistance 1
#-------------------#
<microstructure>
#-------------------#
[Grain001]
crystallite 1
(constituent) phase 1 texture 1 fraction 1.0
[Grain002]
crystallite 1
(constituent) phase 1 texture 2 fraction 1.0
[Grain003]
crystallite 1
(constituent) phase 1 texture 3 fraction 1.0
[Grain004]
crystallite 1
(constituent) phase 1 texture 4 fraction 1.0
[Grain005]
crystallite 1
(constituent) phase 1 texture 5 fraction 1.0
[Grain006]
crystallite 1
(constituent) phase 1 texture 6 fraction 1.0
[Grain007]
crystallite 1
(constituent) phase 1 texture 7 fraction 1.0
[Grain008]
crystallite 1
(constituent) phase 1 texture 8 fraction 1.0
[Grain009]
crystallite 1
(constituent) phase 1 texture 9 fraction 1.0
[Grain010]
crystallite 1
(constituent) phase 1 texture 10 fraction 1.0
[Grain011]
crystallite 1
(constituent) phase 1 texture 11 fraction 1.0
[Grain012]
crystallite 1
(constituent) phase 1 texture 12 fraction 1.0
[Grain013]
crystallite 1
(constituent) phase 1 texture 13 fraction 1.0
[Grain014]
crystallite 1
(constituent) phase 1 texture 14 fraction 1.0
[Grain015]
crystallite 1
(constituent) phase 1 texture 15 fraction 1.0
[Grain016]
crystallite 1
(constituent) phase 1 texture 16 fraction 1.0
[Grain017]
crystallite 1
(constituent) phase 1 texture 17 fraction 1.0
[Grain018]
crystallite 1
(constituent) phase 1 texture 18 fraction 1.0
[Grain019]
crystallite 1
(constituent) phase 1 texture 19 fraction 1.0
[Grain020]
crystallite 1
(constituent) phase 1 texture 20 fraction 1.0
[Grain021]
crystallite 1
(constituent) phase 1 texture 21 fraction 1.0
[Grain022]
crystallite 1
(constituent) phase 1 texture 22 fraction 1.0
[Grain023]
crystallite 1
(constituent) phase 1 texture 23 fraction 1.0
[Grain024]
crystallite 1
(constituent) phase 1 texture 24 fraction 1.0
[Grain025]
crystallite 1
(constituent) phase 1 texture 25 fraction 1.0
[Grain026]
crystallite 1
(constituent) phase 1 texture 26 fraction 1.0
[Grain027]
crystallite 1
(constituent) phase 1 texture 27 fraction 1.0
[Grain028]
crystallite 1
(constituent) phase 1 texture 28 fraction 1.0
[Grain029]
crystallite 1
(constituent) phase 1 texture 29 fraction 1.0
[Grain030]
crystallite 1
(constituent) phase 1 texture 30 fraction 1.0
[Grain031]
crystallite 1
(constituent) phase 1 texture 31 fraction 1.0
[Grain032]
crystallite 1
(constituent) phase 1 texture 32 fraction 1.0
[Grain033]
crystallite 1
(constituent) phase 1 texture 33 fraction 1.0
[Grain034]
crystallite 1
(constituent) phase 1 texture 34 fraction 1.0
[Grain035]
crystallite 1
(constituent) phase 1 texture 35 fraction 1.0
[Grain036]
crystallite 1
(constituent) phase 1 texture 36 fraction 1.0
[Grain037]
crystallite 1
(constituent) phase 1 texture 37 fraction 1.0
[Grain038]
crystallite 1
(constituent) phase 1 texture 38 fraction 1.0
[Grain039]
crystallite 1
(constituent) phase 1 texture 39 fraction 1.0
[Grain040]
crystallite 1
(constituent) phase 1 texture 40 fraction 1.0
[Grain041]
crystallite 1
(constituent) phase 1 texture 41 fraction 1.0
[Grain042]
crystallite 1
(constituent) phase 1 texture 42 fraction 1.0
[Grain043]
crystallite 1
(constituent) phase 1 texture 43 fraction 1.0
[Grain044]
crystallite 1
(constituent) phase 1 texture 44 fraction 1.0
[Grain045]
crystallite 1
(constituent) phase 1 texture 45 fraction 1.0
[Grain046]
crystallite 1
(constituent) phase 1 texture 46 fraction 1.0
[Grain047]
crystallite 1
(constituent) phase 1 texture 47 fraction 1.0
[Grain048]
crystallite 1
(constituent) phase 1 texture 48 fraction 1.0
[Grain049]
crystallite 1
(constituent) phase 1 texture 49 fraction 1.0
[Grain050]
crystallite 1
(constituent) phase 1 texture 50 fraction 1.0
[Grain051]
crystallite 1
(constituent) phase 1 texture 51 fraction 1.0
[Grain052]
crystallite 1
(constituent) phase 1 texture 52 fraction 1.0
[Grain053]
crystallite 1
(constituent) phase 1 texture 53 fraction 1.0
[Grain054]
crystallite 1
(constituent) phase 1 texture 54 fraction 1.0
[Grain055]
crystallite 1
(constituent) phase 1 texture 55 fraction 1.0
[Grain056]
crystallite 1
(constituent) phase 1 texture 56 fraction 1.0
[Grain057]
crystallite 1
(constituent) phase 1 texture 57 fraction 1.0
[Grain058]
crystallite 1
(constituent) phase 1 texture 58 fraction 1.0
[Grain059]
crystallite 1
(constituent) phase 1 texture 59 fraction 1.0
[Grain060]
crystallite 1
(constituent) phase 1 texture 60 fraction 1.0
[Grain061]
crystallite 1
(constituent) phase 1 texture 61 fraction 1.0
[Grain062]
crystallite 1
(constituent) phase 1 texture 62 fraction 1.0
[Grain063]
crystallite 1
(constituent) phase 1 texture 63 fraction 1.0
[Grain064]
crystallite 1
(constituent) phase 1 texture 64 fraction 1.0
[Grain065]
crystallite 1
(constituent) phase 1 texture 65 fraction 1.0
[Grain066]
crystallite 1
(constituent) phase 1 texture 66 fraction 1.0
[Grain067]
crystallite 1
(constituent) phase 1 texture 67 fraction 1.0
[Grain068]
crystallite 1
(constituent) phase 1 texture 68 fraction 1.0
[Grain069]
crystallite 1
(constituent) phase 1 texture 69 fraction 1.0
[Grain070]
crystallite 1
(constituent) phase 1 texture 70 fraction 1.0
[Grain071]
crystallite 1
(constituent) phase 1 texture 71 fraction 1.0
[Grain072]
crystallite 1
(constituent) phase 1 texture 72 fraction 1.0
[Grain073]
crystallite 1
(constituent) phase 1 texture 73 fraction 1.0
[Grain074]
crystallite 1
(constituent) phase 1 texture 74 fraction 1.0
[Grain075]
crystallite 1
(constituent) phase 1 texture 75 fraction 1.0
[Grain076]
crystallite 1
(constituent) phase 1 texture 76 fraction 1.0
[Grain077]
crystallite 1
(constituent) phase 1 texture 77 fraction 1.0
[Grain078]
crystallite 1
(constituent) phase 1 texture 78 fraction 1.0
[Grain079]
crystallite 1
(constituent) phase 1 texture 79 fraction 1.0
[Grain080]
crystallite 1
(constituent) phase 1 texture 80 fraction 1.0
[Grain081]
crystallite 1
(constituent) phase 1 texture 81 fraction 1.0
[Grain082]
crystallite 1
(constituent) phase 1 texture 82 fraction 1.0
[Grain083]
crystallite 1
(constituent) phase 1 texture 83 fraction 1.0
[Grain084]
crystallite 1
(constituent) phase 1 texture 84 fraction 1.0
[Grain085]
crystallite 1
(constituent) phase 1 texture 85 fraction 1.0
[Grain086]
crystallite 1
(constituent) phase 1 texture 86 fraction 1.0
[Grain087]
crystallite 1
(constituent) phase 1 texture 87 fraction 1.0
[Grain088]
crystallite 1
(constituent) phase 1 texture 88 fraction 1.0
[Grain089]
crystallite 1
(constituent) phase 1 texture 89 fraction 1.0
[Grain090]
crystallite 1
(constituent) phase 1 texture 90 fraction 1.0
[Grain091]
crystallite 1
(constituent) phase 1 texture 91 fraction 1.0
[Grain092]
crystallite 1
(constituent) phase 1 texture 92 fraction 1.0
[Grain093]
crystallite 1
(constituent) phase 1 texture 93 fraction 1.0
[Grain094]
crystallite 1
(constituent) phase 1 texture 94 fraction 1.0
[Grain095]
crystallite 1
(constituent) phase 1 texture 95 fraction 1.0
[Grain096]
crystallite 1
(constituent) phase 1 texture 96 fraction 1.0
[Grain097]
crystallite 1
(constituent) phase 1 texture 97 fraction 1.0
[Grain098]
crystallite 1
(constituent) phase 1 texture 98 fraction 1.0
[Grain099]
crystallite 1
(constituent) phase 1 texture 99 fraction 1.0
[Grain100]
crystallite 1
(constituent) phase 1 texture 100 fraction 1.0
#-------------------#
<texture>
#-------------------#
[Grain001]
(gauss) phi1 172.344 Phi 114.046 phi2 294.669 scatter 0.0 fraction 1.0
[Grain002]
(gauss) phi1 186.013 Phi 94.7338 phi2 329.683 scatter 0.0 fraction 1.0
[Grain003]
(gauss) phi1 162.41 Phi 98.9455 phi2 130.322 scatter 0.0 fraction 1.0
[Grain004]
(gauss) phi1 355.272 Phi 140.621 phi2 125.567 scatter 0.0 fraction 1.0
[Grain005]
(gauss) phi1 21.7641 Phi 143.388 phi2 240.373 scatter 0.0 fraction 1.0
[Grain006]
(gauss) phi1 88.1966 Phi 92.3358 phi2 194.78 scatter 0.0 fraction 1.0
[Grain007]
(gauss) phi1 161.137 Phi 78.0062 phi2 111.948 scatter 0.0 fraction 1.0
[Grain008]
(gauss) phi1 169.792 Phi 89.5333 phi2 159.265 scatter 0.0 fraction 1.0
[Grain009]
(gauss) phi1 264.847 Phi 130.291 phi2 180.604 scatter 0.0 fraction 1.0
[Grain010]
(gauss) phi1 70.6323 Phi 84.1754 phi2 341.162 scatter 0.0 fraction 1.0
[Grain011]
(gauss) phi1 67.7751 Phi 36.1662 phi2 139.898 scatter 0.0 fraction 1.0
[Grain012]
(gauss) phi1 111.621 Phi 19.1089 phi2 228.338 scatter 0.0 fraction 1.0
[Grain013]
(gauss) phi1 129.9 Phi 139.011 phi2 238.735 scatter 0.0 fraction 1.0
[Grain014]
(gauss) phi1 221.405 Phi 129.743 phi2 99.6471 scatter 0.0 fraction 1.0
[Grain015]
(gauss) phi1 241.783 Phi 98.3729 phi2 260.615 scatter 0.0 fraction 1.0
[Grain016]
(gauss) phi1 72.5592 Phi 122.403 phi2 165.046 scatter 0.0 fraction 1.0
[Grain017]
(gauss) phi1 64.8818 Phi 82.6384 phi2 236.305 scatter 0.0 fraction 1.0
[Grain018]
(gauss) phi1 201.096 Phi 65.9312 phi2 330.745 scatter 0.0 fraction 1.0
[Grain019]
(gauss) phi1 192.994 Phi 81.9371 phi2 239.326 scatter 0.0 fraction 1.0
[Grain020]
(gauss) phi1 125.335 Phi 90.4527 phi2 207.982 scatter 0.0 fraction 1.0
[Grain021]
(gauss) phi1 55.8848 Phi 26.4455 phi2 100.921 scatter 0.0 fraction 1.0
[Grain022]
(gauss) phi1 40.722 Phi 95.6415 phi2 269.174 scatter 0.0 fraction 1.0
[Grain023]
(gauss) phi1 250.487 Phi 69.6035 phi2 201.732 scatter 0.0 fraction 1.0
[Grain024]
(gauss) phi1 204.199 Phi 84.983 phi2 20.3469 scatter 0.0 fraction 1.0
[Grain025]
(gauss) phi1 12.7416 Phi 128.589 phi2 271.553 scatter 0.0 fraction 1.0
[Grain026]
(gauss) phi1 299.704 Phi 85.3961 phi2 217.359 scatter 0.0 fraction 1.0
[Grain027]
(gauss) phi1 48.8232 Phi 83.6209 phi2 200.361 scatter 0.0 fraction 1.0
[Grain028]
(gauss) phi1 336.395 Phi 97.3059 phi2 187.071 scatter 0.0 fraction 1.0
[Grain029]
(gauss) phi1 274.354 Phi 78.2424 phi2 320.308 scatter 0.0 fraction 1.0
[Grain030]
(gauss) phi1 320.776 Phi 149.72 phi2 163.862 scatter 0.0 fraction 1.0
[Grain031]
(gauss) phi1 179.549 Phi 106.548 phi2 345.498 scatter 0.0 fraction 1.0
[Grain032]
(gauss) phi1 163.508 Phi 24.4238 phi2 127.809 scatter 0.0 fraction 1.0
[Grain033]
(gauss) phi1 193.405 Phi 157.012 phi2 321.342 scatter 0.0 fraction 1.0
[Grain034]
(gauss) phi1 9.09886 Phi 95.9453 phi2 102.32 scatter 0.0 fraction 1.0
[Grain035]
(gauss) phi1 353.876 Phi 150.824 phi2 174.886 scatter 0.0 fraction 1.0
[Grain036]
(gauss) phi1 138.914 Phi 76.5811 phi2 167.927 scatter 0.0 fraction 1.0
[Grain037]
(gauss) phi1 262.655 Phi 76.2738 phi2 12.4459 scatter 0.0 fraction 1.0
[Grain038]
(gauss) phi1 121.849 Phi 65.5254 phi2 192.601 scatter 0.0 fraction 1.0
[Grain039]
(gauss) phi1 275.824 Phi 81.6788 phi2 164.228 scatter 0.0 fraction 1.0
[Grain040]
(gauss) phi1 68.9202 Phi 160.5 phi2 210.862 scatter 0.0 fraction 1.0
[Grain041]
(gauss) phi1 51.0398 Phi 82.7291 phi2 74.016 scatter 0.0 fraction 1.0
[Grain042]
(gauss) phi1 338.746 Phi 62.7854 phi2 129.362 scatter 0.0 fraction 1.0
[Grain043]
(gauss) phi1 204.51 Phi 151.256 phi2 178.89 scatter 0.0 fraction 1.0
[Grain044]
(gauss) phi1 122.098 Phi 104.003 phi2 323.04 scatter 0.0 fraction 1.0
[Grain045]
(gauss) phi1 106.693 Phi 108.61 phi2 336.935 scatter 0.0 fraction 1.0
[Grain046]
(gauss) phi1 118.856 Phi 160.992 phi2 316.152 scatter 0.0 fraction 1.0
[Grain047]
(gauss) phi1 177.962 Phi 114.868 phi2 13.6918 scatter 0.0 fraction 1.0
[Grain048]
(gauss) phi1 330.273 Phi 174.495 phi2 231.249 scatter 0.0 fraction 1.0
[Grain049]
(gauss) phi1 7.31937 Phi 94.7313 phi2 17.8461 scatter 0.0 fraction 1.0
[Grain050]
(gauss) phi1 74.3385 Phi 49.9546 phi2 286.482 scatter 0.0 fraction 1.0
[Grain051]
(gauss) phi1 326.388 Phi 76.9547 phi2 214.758 scatter 0.0 fraction 1.0
[Grain052]
(gauss) phi1 276.024 Phi 72.1242 phi2 275.884 scatter 0.0 fraction 1.0
[Grain053]
(gauss) phi1 137.681 Phi 116.99 phi2 6.87047 scatter 0.0 fraction 1.0
[Grain054]
(gauss) phi1 200.213 Phi 123.618 phi2 268.84 scatter 0.0 fraction 1.0
[Grain055]
(gauss) phi1 7.13702 Phi 56.2015 phi2 119.65 scatter 0.0 fraction 1.0
[Grain056]
(gauss) phi1 72.1783 Phi 81.0906 phi2 6.06213 scatter 0.0 fraction 1.0
[Grain057]
(gauss) phi1 184.565 Phi 110.01 phi2 239.546 scatter 0.0 fraction 1.0
[Grain058]
(gauss) phi1 210.124 Phi 128.631 phi2 8.61611 scatter 0.0 fraction 1.0
[Grain059]
(gauss) phi1 290.326 Phi 170.412 phi2 144.269 scatter 0.0 fraction 1.0
[Grain060]
(gauss) phi1 204.748 Phi 76.7343 phi2 200.385 scatter 0.0 fraction 1.0
[Grain061]
(gauss) phi1 54.3015 Phi 65.9143 phi2 117.373 scatter 0.0 fraction 1.0
[Grain062]
(gauss) phi1 261.263 Phi 52.255 phi2 95.9146 scatter 0.0 fraction 1.0
[Grain063]
(gauss) phi1 328.054 Phi 51.0778 phi2 24.2782 scatter 0.0 fraction 1.0
[Grain064]
(gauss) phi1 163.03 Phi 154.894 phi2 64.126 scatter 0.0 fraction 1.0
[Grain065]
(gauss) phi1 183.87 Phi 80.1848 phi2 18.7438 scatter 0.0 fraction 1.0
[Grain066]
(gauss) phi1 219.91 Phi 113.727 phi2 126.67 scatter 0.0 fraction 1.0
[Grain067]
(gauss) phi1 1.43844 Phi 87.6365 phi2 217.342 scatter 0.0 fraction 1.0
[Grain068]
(gauss) phi1 16.6245 Phi 162.07 phi2 43.7899 scatter 0.0 fraction 1.0
[Grain069]
(gauss) phi1 16.86 Phi 53.8682 phi2 256.917 scatter 0.0 fraction 1.0
[Grain070]
(gauss) phi1 1.01921 Phi 118.449 phi2 307.223 scatter 0.0 fraction 1.0
[Grain071]
(gauss) phi1 19.0397 Phi 83.8885 phi2 262.687 scatter 0.0 fraction 1.0
[Grain072]
(gauss) phi1 99.799 Phi 77.2307 phi2 84.9727 scatter 0.0 fraction 1.0
[Grain073]
(gauss) phi1 234.292 Phi 63.5029 phi2 250.315 scatter 0.0 fraction 1.0
[Grain074]
(gauss) phi1 315.529 Phi 106.015 phi2 103.711 scatter 0.0 fraction 1.0
[Grain075]
(gauss) phi1 235.595 Phi 110.152 phi2 210.277 scatter 0.0 fraction 1.0
[Grain076]
(gauss) phi1 341.907 Phi 17.1839 phi2 332.75 scatter 0.0 fraction 1.0
[Grain077]
(gauss) phi1 352.166 Phi 88.6049 phi2 114.964 scatter 0.0 fraction 1.0
[Grain078]
(gauss) phi1 342.33 Phi 117.777 phi2 180.346 scatter 0.0 fraction 1.0
[Grain079]
(gauss) phi1 224.952 Phi 70.5702 phi2 148.486 scatter 0.0 fraction 1.0
[Grain080]
(gauss) phi1 7.71702 Phi 23.6124 phi2 131.591 scatter 0.0 fraction 1.0
[Grain081]
(gauss) phi1 65.1024 Phi 138.774 phi2 247.344 scatter 0.0 fraction 1.0
[Grain082]
(gauss) phi1 37.6181 Phi 51.5209 phi2 8.4169 scatter 0.0 fraction 1.0
[Grain083]
(gauss) phi1 245.335 Phi 53.4543 phi2 52.5205 scatter 0.0 fraction 1.0
[Grain084]
(gauss) phi1 259.572 Phi 87.7026 phi2 272.065 scatter 0.0 fraction 1.0
[Grain085]
(gauss) phi1 269.39 Phi 103.379 phi2 132.506 scatter 0.0 fraction 1.0
[Grain086]
(gauss) phi1 175.156 Phi 119.338 phi2 355.51 scatter 0.0 fraction 1.0
[Grain087]
(gauss) phi1 248.11 Phi 39.4772 phi2 310.371 scatter 0.0 fraction 1.0
[Grain088]
(gauss) phi1 121.809 Phi 141.465 phi2 10.0736 scatter 0.0 fraction 1.0
[Grain089]
(gauss) phi1 2.4357 Phi 47.118 phi2 274.654 scatter 0.0 fraction 1.0
[Grain090]
(gauss) phi1 314.188 Phi 134.146 phi2 250.673 scatter 0.0 fraction 1.0
[Grain091]
(gauss) phi1 114.815 Phi 121.132 phi2 275.124 scatter 0.0 fraction 1.0
[Grain092]
(gauss) phi1 126.699 Phi 99.0325 phi2 320.537 scatter 0.0 fraction 1.0
[Grain093]
(gauss) phi1 184.138 Phi 20.1663 phi2 159.314 scatter 0.0 fraction 1.0
[Grain094]
(gauss) phi1 296.502 Phi 15.2389 phi2 39.382 scatter 0.0 fraction 1.0
[Grain095]
(gauss) phi1 167.8 Phi 151.764 phi2 192.568 scatter 0.0 fraction 1.0
[Grain096]
(gauss) phi1 257.822 Phi 133.446 phi2 257.108 scatter 0.0 fraction 1.0
[Grain097]
(gauss) phi1 71.6923 Phi 74.5726 phi2 342.575 scatter 0.0 fraction 1.0
[Grain098]
(gauss) phi1 176.748 Phi 28.39 phi2 327.375 scatter 0.0 fraction 1.0
[Grain099]
(gauss) phi1 121.822 Phi 141.836 phi2 22.6349 scatter 0.0 fraction 1.0
[Grain100]
(gauss) phi1 180.151 Phi 109.246 phi2 146.177 scatter 0.0 fraction 1.0

View File

@ -0,0 +1,3 @@
residualStiffness 0.001
charLength 0.02
petsc_options -mech_snes_type newtonls -mech_ksp_type fgmres -mech_pc_type ml -mech_ksp_monitor

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
#initial elastic step
$Loadcase 1 time 0.0005 incs 1 frequency 5
Face 1 X 0.01
Face 2 X 0.00
$EndLoadcase
$Loadcase 2 time 10.0 incs 200 frequency 5
Face 1 X 0.01
Face 2 X 0.00
$EndLoadcase

View File

@ -1,8 +0,0 @@
*.C_ref
*.mesh
*.outputConstitutive
*.outputCrystallite
*.outputHomogenization
*.spectralOut
*.sta
*.vtk

View File

@ -18,8 +18,6 @@ mech none
(output) f # deformation gradient tensor; synonyms: "defgrad"
(output) fe # elastic deformation gradient tensor
(output) fp # plastic deformation gradient tensor
(output) e # total strain as Green-Lagrange tensor
(output) ee # elastic strain as Green-Lagrange tensor
(output) p # first Piola-Kichhoff stress tensor; synonyms: "firstpiola", "1stpiola"
(output) lp # plastic velocity gradient tensor
@ -56,7 +54,6 @@ h0_slipslip 75e6
interaction_slipslip 1 1 1.4 1.4 1.4 1.4
atol_resistance 1
#-------------------#
<microstructure>
#-------------------#

View File

@ -1,52 +0,0 @@
#!/bin/ksh
# 1st arg: $DIR
# 2nd arg: $DIRJOB
# 3rd arg: $user
# 4th arg: $program
DIR=$1
user=$3
program=$4
usernoext=$user
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .F`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .for`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f90`
# add BLAS options for linking
BLAS="%BLAS%"
. $DIR/tools/include
DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "program: $program"
$DFORTLOW $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
userobj=$usernoext.o
$LOAD ${program} $DIR/lib/main.o\
$DIR/lib/blkdta.o $DIR/lib/comm?.o \
${userobj-} \
$DIR/lib/srclib.a \
$MNFLIBS \
$MDUSER \
../lib/mdsrc.a \
../lib/mcvfit.a \
$STUBS \
${SOLVERLIBS} \
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$BLAS \
$SYSLIBS || \
{
echo "$0: link failed for $usernoext.o on host `hostname`"
exit 1
}
/bin/rm $userobj
/bin/rm $DIRJOB/*.mod

View File

@ -481,23 +481,14 @@ else
fi
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOW="$FCOMP $FORT_OPT $PROFILE -O0 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O0 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTRAN="$FCOMP $FORT_OPT $PROFILE -O1 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O1 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias -O2 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias -O2 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
@ -515,23 +506,14 @@ then
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOW="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTRAN="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,52 +0,0 @@
#!/bin/ksh
# 1st arg: $DIR
# 2nd arg: $DIRJOB
# 3rd arg: $user
# 4th arg: $program
DIR=$1
user=$3
program=$4
usernoext=$user
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .F`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .for`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f90`
# add BLAS options for linking
BLAS="%BLAS%"
. $DIR/tools/include
DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "program: $program"
$DFORTHIGH $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
userobj=$usernoext.o
$LOAD ${program} $DIR/lib/main.o\
$DIR/lib/blkdta.o $DIR/lib/comm?.o \
${userobj-} \
$DIR/lib/srclib.a \
$MNFLIBS \
$MDUSER \
../lib/mdsrc.a \
../lib/mcvfit.a \
$STUBS \
${SOLVERLIBS} \
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$BLAS \
$SYSLIBS || \
{
echo "$0: link failed for $usernoext.o on host `hostname`"
exit 1
}
/bin/rm $userobj
/bin/rm $DIRJOB/*.mod

View File

@ -491,25 +491,16 @@ else
fi
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOW="$FCOMP $FORT_OPT $PROFILE -O0 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O0 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRAN="$FCOMP $FORT_OPT $PROFILE -O1 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O1 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias -O2 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias -O2 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
@ -525,25 +516,16 @@ then
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOW="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRAN="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-enable sc3 -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2016 -DDAMASKVERSION=$DAMASKVERSION \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2017 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
-I$MARC_SOURCE/${BCS_DIR}/common -I$MARC_SOURCE/mumpssolver/include $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"

View File

@ -1,8 +0,0 @@
#!/bin/sh
if [ "$1" = "" ]; then
echo "usage: $0 job_name"
exit 1
fi
echo STOP > $1.cnt

View File

@ -1,8 +0,0 @@
#!/bin/sh
if [ "$1" = "" ]; then
echo "usage: $0 job_name"
exit 1
fi
echo STOP > $1.cnt

View File

@ -1,187 +0,0 @@
#!/bin/sh
#
# The exit status of this script is read by Mentat.
# Normal exit status is 0.
#
DIR=%INSTALLDIR%/marc%VERSION%
if test $MARCDIR1
then
DIR=$MARCDIR1
fi
if test -z "$DIR"; then
REALCOM="`ls -l $0 |awk '{ print $NF; }'`"
DIRSCRIPT=`dirname $REALCOM`
case $DIRSCRIPT in
\/*)
;;
*)
DIRSCRIPT=`pwd`/$DIRSCRIPT
;;
esac
. $DIRSCRIPT/getarch
DIR="$MENTAT_MARCDIR"
fi
SRCEXT=.f
SRCEXTC=.F
RSTEXT=.t08
PSTEXT=.t19
PSTEXTB=.t16
VWFCEXT=.vfs
slv=$1
version=$2
ndom_fea_solver=$3
ndom_preprocessor=$4
hostfile=$5
compat=$6
job=$7
srcfile=$8
srcmeth=$9
shift 9 # cannot use $10, $11, ...
restart=$1
postfile=$2
viewfactorsfile=$3
autorst=$4
copy_datfile="-ci $5"
copy_postfile="-cr $6"
scr_dir=$7
dcoup=$8
assem_recov_nthread=$9
shift 9 # cannot use $10, $11, ...
nthread=$1
nsolver=$2
mode=$3
gpu=$4
if [ "$slv" != "" -a "$slv" != "marc" ]; then
slv="-iam sfm"
else
slv=""
fi
if [ "$ndom_fea_solver" != "" -a "$ndom_fea_solver" != "1" ]; then
nprocds="-nprocds $ndom_fea_solver"
else
nprocd=""
if [ "$ndom_preprocessor" != "" -a "$ndom_preprocessor" != "1" ]; then
nprocd="-nprocd $ndom_preprocessor"
else
nprocd=""
fi
fi
if [ "$srcfile" != "" -a "$srcfile" != "-" ]; then
srcfile=`echo $srcfile | sed "s/$SRCEXT$//" | sed "s/$SRCEXTC$//"`
case "$srcmeth" in
-)
srcfile="-u $srcfile"
;;
compsave)
srcfile="-u $srcfile -save y"
;;
runsaved)
srcfile=${srcfile%.*}".marc"
srcfile="-prog $srcfile"
;;
esac
else
srcfile=""
fi
if [ "$restart" != "" -a "$restart" != "-" ]; then
restart=`echo $restart | sed "s/$RSTEXT$//"`
restart="-r $restart"
else
restart=""
fi
if [ "$postfile" != "" -a "$postfile" != "-" ]; then
postfile=`echo $postfile | sed "s/$PSTEXT$//"`
postfile=`echo $postfile | sed "s/$PSTEXTB$//"`
postfile="-pid $postfile"
else
postfile=""
fi
if [ "$viewfactorsfile" != "" -a "$viewfactorsfile" != "-" ]; then
viewfactorsfile=`echo $viewfactorsfile | sed "s/$VWFCEXT$//"`
viewfactorsfile="-vf $viewfactorsfile"
else
viewfactorsfile=""
fi
if [ "$hostfile" != "" -a "$hostfile" != "-" ]; then
hostfile="-ho $hostfile"
else
hostfile=""
fi
if [ "$compat" != "" -a "$compat" != "-" ]; then
compat="-co $compat"
else
compat=""
fi
if [ "$scr_dir" != "" -a "$scr_dir" != "-" ]; then
scr_dir="-sd $scr_dir"
else
scr_dir=""
fi
if [ "$dcoup" != "" -a "$dcoup" != "0" ]; then
dcoup="-dcoup $dcoup"
else
dcoup=""
fi
if [ "$assem_recov_nthread" != "" -a "$assem_recov_nthread" != "1" ]; then
assem_recov_nthread="-nthread_elem $assem_recov_nthread"
else
assem_recov_nthread=""
fi
if [ "$nthread" != "" -a "$nthread" != "0" -a "$nthread" != "1" ]; then
nthread="-nthread $nthread"
else
nthread=""
fi
if [ "$nsolver" != "" -a "$nsolver" != "0" ]; then
nsolver="-nsolver $nsolver"
else
nsolver=""
fi
case "$mode" in
4) mode="-mo i4" ;;
8) mode="-mo i8" ;;
*) mode= ;;
esac
if [ "$gpu" != "" -a "$gpu" != "-" ]; then
gpu="-gpu $gpu"
else
gpu=""
fi
rm -f $job.cnt
rm -f $job.sts
rm -f $job.out
rm -f $job.log
# To prevent a mismatch with the python version used by the solver
# do *not* prepend $MENTAT_INSTALL_DIR/python/bin to environment variable PATH
# unset environment variables PYTHONHOME and PYTHONPATH
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1
sleep 1
exit 0

View File

@ -1,187 +0,0 @@
#!/bin/sh
#
# The exit status of this script is read by Mentat.
# Normal exit status is 0.
#
DIR=%INSTALLDIR%/marc%VERSION%
if test $MARCDIR1
then
DIR=$MARCDIR1
fi
if test -z "$DIR"; then
REALCOM="`ls -l $0 |awk '{ print $NF; }'`"
DIRSCRIPT=`dirname $REALCOM`
case $DIRSCRIPT in
\/*)
;;
*)
DIRSCRIPT=`pwd`/$DIRSCRIPT
;;
esac
. $DIRSCRIPT/getarch
DIR="$MENTAT_MARCDIR"
fi
SRCEXT=.f
SRCEXTC=.F
RSTEXT=.t08
PSTEXT=.t19
PSTEXTB=.t16
VWFCEXT=.vfs
slv=$1
version=$2
ndom_fea_solver=$3
ndom_preprocessor=$4
hostfile=$5
compat=$6
job=$7
srcfile=$8
srcmeth=$9
shift 9 # cannot use $10, $11, ...
restart=$1
postfile=$2
viewfactorsfile=$3
autorst=$4
copy_datfile="-ci $5"
copy_postfile="-cr $6"
scr_dir=$7
dcoup=$8
assem_recov_nthread=$9
shift 9 # cannot use $10, $11, ...
nthread=$1
nsolver=$2
mode=$3
gpu=$4
if [ "$slv" != "" -a "$slv" != "marc" ]; then
slv="-iam sfm"
else
slv=""
fi
if [ "$ndom_fea_solver" != "" -a "$ndom_fea_solver" != "1" ]; then
nprocds="-nprocds $ndom_fea_solver"
else
nprocd=""
if [ "$ndom_preprocessor" != "" -a "$ndom_preprocessor" != "1" ]; then
nprocd="-nprocd $ndom_preprocessor"
else
nprocd=""
fi
fi
if [ "$srcfile" != "" -a "$srcfile" != "-" ]; then
srcfile=`echo $srcfile | sed "s/$SRCEXT$//" | sed "s/$SRCEXTC$//"`
case "$srcmeth" in
-)
srcfile="-u $srcfile"
;;
compsave)
srcfile="-u $srcfile -save y"
;;
runsaved)
srcfile=${srcfile%.*}".marc"
srcfile="-prog $srcfile"
;;
esac
else
srcfile=""
fi
if [ "$restart" != "" -a "$restart" != "-" ]; then
restart=`echo $restart | sed "s/$RSTEXT$//"`
restart="-r $restart"
else
restart=""
fi
if [ "$postfile" != "" -a "$postfile" != "-" ]; then
postfile=`echo $postfile | sed "s/$PSTEXT$//"`
postfile=`echo $postfile | sed "s/$PSTEXTB$//"`
postfile="-pid $postfile"
else
postfile=""
fi
if [ "$viewfactorsfile" != "" -a "$viewfactorsfile" != "-" ]; then
viewfactorsfile=`echo $viewfactorsfile | sed "s/$VWFCEXT$//"`
viewfactorsfile="-vf $viewfactorsfile"
else
viewfactorsfile=""
fi
if [ "$hostfile" != "" -a "$hostfile" != "-" ]; then
hostfile="-ho $hostfile"
else
hostfile=""
fi
if [ "$compat" != "" -a "$compat" != "-" ]; then
compat="-co $compat"
else
compat=""
fi
if [ "$scr_dir" != "" -a "$scr_dir" != "-" ]; then
scr_dir="-sd $scr_dir"
else
scr_dir=""
fi
if [ "$dcoup" != "" -a "$dcoup" != "0" ]; then
dcoup="-dcoup $dcoup"
else
dcoup=""
fi
if [ "$assem_recov_nthread" != "" -a "$assem_recov_nthread" != "1" ]; then
assem_recov_nthread="-nthread_elem $assem_recov_nthread"
else
assem_recov_nthread=""
fi
if [ "$nthread" != "" -a "$nthread" != "0" -a "$nthread" != "1" ]; then
nthread="-nthread $nthread"
else
nthread=""
fi
if [ "$nsolver" != "" -a "$nsolver" != "0" ]; then
nsolver="-nsolver $nsolver"
else
nsolver=""
fi
case "$mode" in
4) mode="-mo i4" ;;
8) mode="-mo i8" ;;
*) mode= ;;
esac
if [ "$gpu" != "" -a "$gpu" != "-" ]; then
gpu="-gpu $gpu"
else
gpu=""
fi
rm -f $job.cnt
rm -f $job.sts
rm -f $job.out
rm -f $job.log
# To prevent a mismatch with the python version used by the solver
# do *not* prepend $MENTAT_INSTALL_DIR/python/bin to environment variable PATH
# unset environment variables PYTHONHOME and PYTHONPATH
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask_l" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1
sleep 1
exit 0

View File

@ -20,7 +20,7 @@ DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "program: $program"
$DFORTRAN $user || \
$DFORTHIGHMP $user || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -20,7 +20,7 @@ DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "program: $program"
$DFORTHIGH $user || \
$DFORTRANLOWMP $user || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -20,7 +20,7 @@ DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "program: $program"
$DFORTRAN $user || \
$DFORTRANMP $user || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -6,27 +6,18 @@
DIR=$1
user=$3
program=$4
usernoext=$user
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .F`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .for`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f90`
# add BLAS options for linking
BLAS="%BLAS%"
. $DIR/tools/include
DIRJOB=$2
cd $DIRJOB
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "Compiling and linking user subroutine $user.f on host `hostname`"
echo "program: $program"
$DFORTLOW $user || \
$FORTRAN $user.f || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
exit 1
}
/bin/rm $program 2>/dev/null
userobj=$usernoext.o
userobj=$user.o
$LOAD ${program} $DIR/lib/main.o\
@ -42,11 +33,9 @@ echo "program: $program"
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$BLAS \
$SYSLIBS || \
{
echo "$0: link failed for $usernoext.o on host `hostname`"
echo "$0: link failed for $user.o on host `hostname`"
exit 1
}
/bin/rm $userobj
/bin/rm $DIRJOB/*.mod

View File

@ -0,0 +1,813 @@
#
# General definitions for the Marc 2018.0 version
#
# EM64T
#
# Linux RedHat 7.1 / SuSE 11 SP4
#
# 64 bit MPI version
#
# Intel(R) Fortran Intel(R) 64 Compiler XE for applications
# running on Intel(R) 64, Version 17.0.2.174 Build 20170213
#
# Intel(R) C Intel(R) 64 Compiler XE for applications
# running on Intel(R) 64, Version 17.0.2.174 Build 20170213
#
# To check the O/S level, type:
# uname -a
#
# Distributed parallel MPI libraries:
# 1) HP MPI 2.3
# To check the mpi version, type:
# mpirun -version
# 2) Intel MPI 2017.1
# To check the mpi version, type:
# mpiexec.hydra -version
#
# To check the Compiler level, type using the compiler
# installation path:
# ifort -V
# icc -V
#
# REMARKS : This file contains the definitions of variables used during
# compilation loading and use of the MARC programmes . The
# current machine type is identified by means of the variable
# MACHINE , defined below.
#
#
# MPI_ROOT: root directory in which mpi shared libraries, etc. are located
# DIRJOB : directory in which spawned jobs should look for Marc input
# MPI_ARCH: system architecture
# MPI_EPATH: path where executable resides
#
REVISION="VERSION, BUILD"
HOSTNAME=`hostname`
# find available memory in Mbyte on the machine
# can be set explicitly
MEMLIMIT=`free -m | awk '/Mem:/ {print $2}'`
# set _OEM_NASTRAN to 1 for MD Nastran build
# override _OEM_NASTRAN setting with MARC_MD_NASTRAN environment variable
_OEM_NASTRAN="${MARC_MD_NASTRAN:-0}"
# uncomment the following line for an autoforge build
#AUTOFORGE=1
AUTOFORGE=0
export AUTOFORGE
# integer size
if test "$MARC_INTEGER_SIZE" = "" ; then
INTEGER_PATH=
else
INTEGER_PATH=/$MARC_INTEGER_SIZE
fi
FCOMP=ifort
INTELPATH="/opt/intel/compilers_and_libraries_2017/linux"
# find the root directory of the compiler installation:
# - if ifort is found in $PATH, then the root directory is derived
# from the path to ifort
# - if ifort is not found in $PATH, the root directory is assumed
# to be $INTELPATH and the directory in which ifort is found is
# added to $PATH
FCOMPPATH=`which "$FCOMP" 2>/dev/null`
if test -n "$FCOMPPATH"; then
# derive the root directory from $FCOMPPATH
FCOMPROOT="${FCOMPPATH%/bin/intel64/$FCOMP}"
if test "$FCOMPROOT" = "$FCOMPPATH"; then
FCOMPROOT="${FCOMPPATH%/bin/$FCOMP}"
fi
if test "$FCOMPROOT" = "$FCOMPPATH"; then
FCOMPROOT=
fi
elif test -d "$INTELPATH"; then
# check for compiler in $INTELPATH
if test -d "$INTELPATH/bin/intel64" -a \
-x "$INTELPATH/bin/intel64/$FCOMP" ; then
FCOMPROOT="$INTELPATH"
PATH="$INTELPATH/bin/intel64:$PATH"
elif test -d "$INTELPATH/bin" -a \
-x "$INTELPATH/bin/$FCOMP"; then
FCOMPROOT="$INTELPATH"
PATH="$INTELPATH/bin:$PATH"
else
FCOMPROOT=
fi
else
FCOMPROOT=
fi
# AEM
if test "$MARCDLLOUTDIR" = ""; then
DLLOUTDIR="$MARC_LIB"
else
DLLOUTDIR="$MARCDLLOUTDIR"
fi
# settings for MKL
if test "$IMKLDIR" = ""; then
MARC_MKL="$FCOMPROOT/mkl/lib/intel64"
else
MARC_MKL=$IMKLDIR/lib/intel64
fi
#
# settings for Metis
#
METIS="-I$METIS_SOURCE/include"
METISLIBS="$METISLIB_DIR/libmarcddm.a $METISLIB_DIR/libmarcmetis.a "
#
# settings for MPI
#
# RCP and RSH are used for parallel network runs
# replace with similar commands like rsh if needed
RCP=/usr/bin/scp
RSH=/usr/bin/ssh
#
MPI_DEFAULT=intelmpi
MPI_OTHER=hpmpi
MPITYPE=$MPI_DEFAULT
if test $AUTOFORGE
then
if test $AUTOFORGE = 1
then
MPITYPE=none
fi
fi
# overrule MPITYPE setting with environmental variable MARC_MPITYPE
if test $MARC_MPITYPE
then
MPITYPE=$MARC_MPITYPE
fi
# always set MPITYPE to none for MD Nastran
if test "$_OEM_NASTRAN" -ne 0
then
MPITYPE=none
fi
# Edit following lines to build with GPGPU version of BCS Solver for
# NVIDIA platforms
#BCSGPUSOLVER=NONE
BCSGPUSOLVER=BCSGPU
# Edit following lines to set the openssl library
if test "$OPENSSL" != "NONE"
then
OPENSSL_LIB="$MARC_LIB/libcrypto.a"
fi
OPENSSL_INCLUDE=-I"$MARC_OPENSSL/include/"
# activate contact component build if flagged
AEM_DLL=0
if test "$AEM_BUILD" = "ON" ; then
AEM_DLL=1
LINK_MARC_DLL="-shared -fPIC"
EXT_DLL="so"
MPITYPE=none
MPI_OTHER=
BCSGPUSOLVER=NONE
MUMPSSOLVER=NONE
CASISOLVER=NONE
fi
SOLVERFLAGS=
if test "$BCSGPUSOLVER" = BCSGPU
then
SOLVERFLAGS="$SOLVERFLAGS -DBCSGPU -DCUDA"
BCS_DIR=bcsgpusolver
else
BCS_DIR=bcssolver
fi
#
# settings for MPI
#
DDM=
if test $MPITYPE != none
then
if test $MPITYPE = hpmpi
then
FCOMPMPI=mpif90
export MPI_ROOT=$MARC_HPMPI
export MPI_REMSH=$RSH
export MPI_F77=$FCOMP
ARCHITECTURE=linux_amd64
DDM="-I$MPI_ROOT/include/64 -DDDM -DHPMPI"
MPI_CLEAN=
export MPI_EPATH=$MARC_BIN
export LD_LIBRARY_PATH=$MPI_ROOT/lib/$ARCHITECTURE:$MARC_LIB:$MARC_LIB_SHARED:$LD_LIBRARY_PATH
export MPIHPSPECIAL="-e MPI_FLAGS=E,T,y1"
# Below line is moved in run_marc file
# export MPIHPSPECIAL="$MPIHPSPECIAL -e LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export MPIHPSPECIAL="$MPIHPSPECIAL -e BINDIR=$MARC_BIN"
if test -n "$MSC_LICENSE_FILE"
then
export MPIHPSPECIAL="$MPIHPSPECIAL -e MSC_LICENSE_FILE=$MSC_LICENSE_FILE"
fi
if test -n "$LM_LICENSE_FILE"
then
export MPIHPSPECIAL="$MPIHPSPECIAL -e LM_LICENSE_FILE=$LM_LICENSE_FILE"
fi
export MPIHPSPECIAL="$MPIHPSPECIAL -e MPI_LIC_CHECKER=$MPI_ROOT/bin/licensing/amd64_s8/lichk.x"
RUN_JOB2="$MPI_ROOT/bin/mpirun ${MPIRUNOPTIONS} -prot -f "
RUN_JOB1="$MPI_ROOT/bin/mpirun ${MPIRUNOPTIONS} -prot -w $MPIHPSPECIAL -np "
RUN_JOB0=
fi
if test $MPITYPE = intelmpi
then
INTELMPI_VERSION=HYDRA
FCOMPMPI=mpiifort
MPI_ROOT=$MARC_INTELMPI
DDM="-I${MPI_ROOT}/include -DDDM"
PATH=$MPI_ROOT/bin:$PATH
export PATH
LD_LIBRARY_PATH=$MPI_ROOT/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
if test $INTELMPI_VERSION = HYDRA
then
RUN_JOB1="${MPI_ROOT}/bin/mpiexec.hydra -genvall -n "
RUN_JOB2="${MPI_ROOT}/bin/mpiexec.hydra -genvall"
else
RUN_JOB1="${MPI_ROOT}/bin/mpiexec -n "
RUN_JOB2="${MPI_ROOT}/bin/mpiexec -configfile "
fi
RUN_JOB0=
MPI_CLEAN=
MPI_EPATH=$MARC_BIN
MPIR_HOME=$MPI_ROOT
MPICH_F77=$FCOMP
MPICH_F77LINKER=$FCOMP
export MPI_ROOT MPI_EPATH MPIR_HOME MPICH_F77 MPICH_F77LINKER
I_MPI_PIN_DOMAIN=node
export I_MPI_PIN_DOMAIN
fi
else
MPI_ROOT=$MARC_DUMMYMPI
export MPI_ROOT=$MARC_DUMMYMPI
DDM="-I$MPI_ROOT/include"
fi
#
# variables for the "maintain" script
#
MACHINENAME=LINUX
MACHINE64BIT=yes
MACHINE=Linux_EM64T
DEV=/dev/tape
GETLOG="whoami"
CLEAR="clear"
MY_UNAME=`uname -a`
# Edit following 2 lines to build with VKI Solver
#VKISOLVER=VKI
VKISOLVER=NONE
# Edit following 2 lines to build with CASI Solver
CASISOLVER=CASI
if test "$MARC_CASISOLVER" = "NONE" ; then
CASISOLVER=NONE
fi
#CASISOLVER=NONE
# Edit following 2 lines to build with MF2 Solver
MF2SOLVER=NONE
#MF2SOLVER=SERIAL
#MF2SOLVER=MF2PARALLEL
# Edit following lines to build with Intel(c) Multithreaded solver (PARDISO)
#INTELSOLVER=NONE
INTELSOLVER=PARDISO
# Edit following lines to build with MUMPS
if test "$MARC_INTEGER_SIZE" = "i4" ; then
#MUMPSSOLVER=NONE
MUMPSSOLVER=MUMPS
else
#MUMPSSOLVER=NONE
MUMPSSOLVER=MUMPS
fi
# Edit following 2 lines to build MARC dynamic shared library
MARC_DLL=MARC_DLL
MARC_DLL=NONE
# always set VKISOLVER, CASISOLVER, BCSGPUSOLVER, and MARC_DLL to NONE for MD Nastran
if test "$_OEM_NASTRAN" -ne 0
then
VKISOLVER=NONE
CASISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
MUMPSSOLVER=NONE
BCSGPUSOLVER=NONE
MARC_DLL=NONE
fi
if test "$AEM_DLL" -eq 1
then
VKISOLVER=NONE
CASISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
MUMPSSOLVER=NONE
BCSGPUSOLVER=NONE
fi
#
# define Fortran and C compile syntax
#
if test "$VKISOLVER" = VKI
then
SOLVERFLAGS="$SOLVERFLAGS -DVKI"
fi
if test "$CASISOLVER" = CASI
then
SOLVERFLAGS="$SOLVERFLAGS -DCASI"
fi
if test "$MF2SOLVER" = MF2PARALLEL
then
SOLVERFLAGS="$SOLVERFLAGS -DMF2PARALLEL"
fi
if test "$MF2SOLVER" = MF2SERIAL
then
SOLVERFLAGS="$SOLVERFLAGS -DMF2SERIAL"
fi
if test "$INTELSOLVER" = PARDISO
then
SOLVERFLAGS="$SOLVERFLAGS -DPARDISO"
fi
if test "$MUMPSSOLVER" = MUMPS
then
SOLVERFLAGS="$SOLVERFLAGS -DMUMPS"
fi
if test "$MARC_DLL" = MARC_DLL
then
SOLVERFLAGS="$SOLVERFLAGS -DMARC_DLL"
fi
LINK_OPT=
DEBUG_OPT=
C_DEBUG_OPT=
#Uncomment following line to build Marc in debuggable mode
MARCDEBUG=
#MARCDEBUG="ON"
if test "$MARCDEBUG" = "ON"
then
LINK_OPT="-debug -traceback"
DEBUG_OPT="-debug -traceback"
C_DEBUG_OPT="-debug -traceback"
fi
MARCCHECK=
#MARCCHECK="ON"
if test "$MARCCHECK" = "ON"
then
DEBUG_OPT="$DEBUG_OPT -fpe0 -fp-stack-check -check all -ftrapuv "
C_DEBUG_OPT="$C_DEBUG_OPT -fp-stack-check -check-uninit -Wformat -ftrapuv "
fi
MARCCODECOV=
#MARCCODECOV="ON"
MARCCODEPROF=
#MARCCODEPROF="ON"
if test "$MARC_INTEGER_SIZE" = "i4" ; then
I8FFLAGS="-real-size 64 -integer-size 32"
I8DEFINES="-DFLOAT=8 -DINT=4"
I8CDEFINES=
else
I8FFLAGS="-i8 -real-size 64 -integer-size 64"
I8DEFINES="-DI64 -DFLOAT=8 -DINT=8"
I8CDEFINES="-U_DOUBLE -D_SINGLE"
fi
MTHREAD=OPENMP
if test "$MARC_OPENMP" = "NONE" ; then
MTHREAD=NONE
fi
#MTHREAD=NONE
if test "$_OEM_NASTRAN" -ne 0
then
MTHREAD=NONE
fi
if test "$AEM_DLL" -eq 1
then
MTHREAD=NONE
CASISOLVER=NONE
VKISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
BCSGPUSOLVER=NONE
OPENSSL_LIB=
MARC_DLL=NONE
METISLIBS=
fi
OMP_COMPAT=NO
OMP_COMPAT=YES
if test "$MTHREAD" = "NONE"
then
OMP_COMPAT=NO
fi
CDEFINES=
FDEFINES=
if test "$_OEM_NASTRAN" -ne 0
then
CDEFINES="$CDEFINES -D_OEM_NASTRAN"
FDEFINES="$FDEFINES -D_OEM_NASTRAN"
fi
FDEFINES="$FDEFINES -D_IMPLICITNONE"
if test "$_OEM_NASTRAN" -eq 0
then
FDEFINES="$FDEFINES -DMKL -DOPENMP"
fi
if test "$OMP_COMPAT" = "YES"
then
FDEFINES="$FDEFINES -DOMP_COMPAT"
fi
# -D_MSCMARC
FDEFINES="$FDEFINES -D_MSCMARC $DEBUG_OPT $MARC_SIMUFACT"
CDEFINES="$CDEFINES -D_MSCMARC $C_DEBUG_OPT $I8CDEFINES"
if test "$AEM_DLL" -eq 1
then
FDEFINES="$FDEFINES -D_AEMNL -DAAA"
CDEFINES="$CDEFINES -D_AEMNL -DAAA"
fi
CINCL="-I$MARC_SOURCE/mdsrc -I$MARC_SOURCE/csource $METIS"
if test "$_OEM_NASTRAN" -ne 0
then
CINCL="$CINCL -I../../include"
fi
CC_OPT=
if test "$MTHREAD" = "OPENMP"
then
CC_OPT=" $CC_OPT -qopenmp"
fi
CC="icc -c $CC_OPT -O1 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCLOW="icc -c $CC_OPT -O0 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCHIGH="icc -c $CC_OPT -O3 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
if test "$MARCDEBUG" = "ON"
then
CC="icc -c $CC_OPT -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCLOW="icc $CC_OPT -c -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCHIGH="icc $CC_OPT -c -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
fi
LOAD_CC="icc $CC_OPT -O1 -DLinux -DLINUX -DLinux_intel"
CCT="$CC"
CCTLOW="$CCLOW"
CCTHIGH="$CCHIGH"
#PROFILE="-Mprof=func"
#PROFILE="-Mprof=lines"
#PROFILE="-Mprof=func,mpi"
PROFILE=
#PROFILE="-init=snan,arrays -CB -traceback -fpe0 -fp-stack-check -check all -check uninit -ftrapuv"
if test "$MARCCODECOV" = "ON"
then
PROFILE="-prof-gen=srcpos"
fi
if test "$MARCCODEPROF" = "ON"
then
PROFILE=" $PROFILE -pg"
fi
FORT_OPT="-c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr -mp1 -WB -fp-model source"
if test "$MTHREAD" = "OPENMP"
then
FORT_OPT=" $FORT_OPT -qopenmp"
if test "$OMP_COMPAT" = "YES"
then
FORT_OPT=" $FORT_OPT -qopenmp-threadprivate=compat"
fi
else
# FORT_OPT=" $FORT_OPT -auto "
FORT_OPT=" $FORT_OPT -save -zero"
fi
FORTLOW="$FCOMP $FORT_OPT $PROFILE -O0 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTRAN="$FCOMP $FORT_OPT $PROFILE -O1 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias -O3 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTNA="$FCOMP $FORT_OPT -fno-alias -O3 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
# for compiling free form f90 files. high opt, integer(4)
FORTF90="$FCOMP -c -O3"
# determine DAMASK version
if test -n "$DAMASK_USER"; then
DAMASKROOT=`dirname $DAMASK_USER`/..
read DAMASKVERSION < $DAMASKROOT/VERSION
DAMASKVERSION="'"$DAMASKVERSION"'"
else
DAMASKVERSION="'N/A'"
fi
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O0 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -O1 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias -O2 $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
if test "$MARCDEBUG" = "ON"
then
FORTLOW="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTRAN="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTNA="$FCOMP $FORT_OPT -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
fi
# DAMASK compiler calls: additional flags are in line 2 OpenMP flags in line 3
DFORTLOWMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTRANMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
DFORTHIGHMP="$FCOMP -c -implicitnone -stand f08 -standard-semantics -assume nostd_mod_proc_name -safe_cray_ptr $PROFILE -zero -mp1 -WB -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
-fpp -ftz -diag-disable 5268 -warn declarations -warn general -warn usage -warn interfaces -warn ignore_loc -warn alignments -DMarc4DAMASK=2018 -DDAMASKVERSION=$DAMASKVERSION \
-qopenmp -qopenmp-threadprivate=compat\
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTLOWT="$FORTLOW"
FORTRANT="$FORTRAN"
FORTHIGHT="$FORTHIGH"
FORTRANMNF="$FCOMP -c $FDEFINES "
CCMNF="icc -c -O1 -DLinux -DLINUX -DLinux_intel -Dport2egcs -I$MARC_SOURCE/marctoadams/mnf/include -D_LARGEFILE64_SOURCE"
if test $MPITYPE != none
then
if test $MPITYPE = hpmpi
then
LOAD="$MPI_ROOT/bin/$FCOMPMPI ${LOADOPTIONS} -L$MPI_ROOT/lib/$ARCHITECTURE $PROFILE $LINK_OPT -o "
LOADT="$MPI_ROOT/bin/$FCOMPMPI ${LOADOPTIONS} -L$MPI_ROOT/lib/$ARCHITECTURE $PROFILE $LINK_OPT -o "
fi
# Uncomment the following lines to turn on the tracer and commnet out the next 5 lines
# if test $MPITYPE = intelmpi
# then
# INCLUDEMPI="-I$MPI_ROOT/include -I$VT_ROOT/include"
# LOAD="$MPI_ROOT/bin/$FCOMPMPI $PROFILE $INCLUDEMPI -g -t=log $LINK_OPT -o "
# LOADT="$MPI_ROOT/bin/$FCOMPMPI $PROFILE $INCLUDEMPI -g -t=log $LINK_OPT -o "
# fi
if test $MPITYPE = intelmpi
then
LOAD="ifort $PROFILE $LINK_OPT -o "
LOADT="ifort $PROFILE $LINK_OPT -o "
fi
else
LOAD="$FCOMP $LINK_OPT -o "
LOADT="$FCOMP $LINK_OPT -o "
fi
if test "$MARC_DLL" = MARC_DLL
then
FORTLOW="$FORTLOW -fpp -fPIC"
FORTRAN="$FORTRAN -fpp -fPIC"
FORTHIGH="$FORTHIGH -fpp -fPIC"
FORTRANMNF="$FORTRANMNF -fpp -fPIC"
CC="$CC -fPIC"
CCMNF="$CCMNF -fPIC"
LINK_EXE_MARC="-L$MARC_LIB -lmarc -L$MARC_LIB_SHARED -lguide -lpthread"
LINK_MARC_DLL="-shared -fPIC"
LOAD_DLL=$LOAD
LOADT_DLL=$LOADT
EXT_DLL="so"
fi
if test "$AEM_DLL" -eq 1
then
FORTLOW="$FORTLOW -fpp -fPIC"
FORTRAN="$FORTRAN -fpp -fPIC"
FORTHIGH="$FORTHIGH -fpp -fPIC"
FORTRANMNF="$FORTRANMNF -fpp -fPIC"
CC="$CC -fPIC"
CCMNF="$CCMNF -fPIC"
LINK_EXE_MARC="-L$MARC_LIB -lmarc -L$MARC_LIB_SHARED -lguide"
LINK_MARC_DLL="-shared -fPIC"
LOAD_DLL=$LOAD
LOADT_DLL=$LOADT
EXT_DLL="so"
fi
XLIBS="-L/usr/X11/lib -lX11 "
#
# define archive and ranlib syntax
#
ARC="ar rvl"
ARD="ar dvl"
ARX="ar xl"
RAN=""
#
# choose which libraries you want to use ( e.g. blas )
#
if test "$VKISOLVER" = VKI
then
VKISOLVERLIBS="$MARC_LIB/vkisolver.a"
else
VKISOLVERLIBS=
fi
if test "$CASISOLVER" = CASI
then
CASISOLVERLIBS="$CASILIB_DIR/libmarccasi.a $CASILIB_DIR/libcasi.a"
else
CASISOLVERLIBS=
fi
MF2SOLVERLIBS=
if test "$MF2SOLVER" = MF2PARALLEL
then
MF2SOLVERLIBS="$MARC_LIB/mf2parallel/libseq.a \
$MARC_LIB/mf2parallel/libsym.a \
$MARC_LIB/mf2parallel/libmet.a \
$MARC_LIB/mf2parallel/libmf2.a \
$MARC_LIB/mf2parallel/libgauss.a \
$MARC_LIB/mf2parallel/libmf2.a \
$MARC_LIB/mf2parallel/libgauss.a \
$MARC_LIB/mf2parallel/libnum.a \
$MARC_LIB/mf2parallel/libutl.a \
$MARC_LIB/mf2parallel/libr8.a \
$MARC_LIB/mf2parallel/libz.a "
fi
if test "$MUMPSSOLVER" = MUMPS
then
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
if test $MPITYPE = none
then
MUMPSSOLVERLIBS2=
echo hello > /dev/null
fi
if test $MPITYPE = intelmpi
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_lp64.a "
else
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_ilp64.a "
fi
fi
if test $MPITYPE = hpmpi
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_lp64.a"
else
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_ilp64.a"
fi
fi
else
MUMPSSOLVERLIBS=
MUMPSSOLVERLIBS2=
fi
if test "$BCSGPUSOLVER" = BCSGPU
then
BCSSOLVERLIBS="${BCSLIB_DIR}/bcsgpulib.a "
MARCCUDALIBS1="-L${BCSLIB_DIR}/cuda_dummy -lmarccuda "
MARCCUDALIBS2="-L${BCSLIB_DIR}/cuda -lmarccuda "
MARCCUDALIBS=$MARCCUDALIBS1
else
BCSSOLVERLIBS="${MARC_LIB}/bcslib.a "
fi
if test "$AEM_DLL" -eq 1
then
BCSSOLVERLIBS=
fi
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MKLLIB="$MARC_MKL/libmkl_scalapack_lp64.a -Wl,--start-group $MARC_MKL/libmkl_intel_lp64.a $MARC_MKL/libmkl_intel_thread.a $MARC_MKL/libmkl_core.a $MARC_MKL/libmkl_blacs_intelmpi_lp64.a $MUMPSSOLVERLIBS2 -Wl,--end-group"
else
MKLLIB="$MARC_MKL/libmkl_scalapack_ilp64.a -Wl,--start-group $MARC_MKL/libmkl_intel_ilp64.a $MARC_MKL/libmkl_intel_thread.a $MARC_MKL/libmkl_core.a $MARC_MKL/libmkl_blacs_intelmpi_ilp64.a $MUMPSSOLVERLIBS2 -Wl,--end-group"
fi
SECLIBS="-L$MARC_LIB -llapi"
SOLVERLIBS="${BCSSOLVERLIBS} ${VKISOLVERLIBS} ${CASISOLVERLIBS} ${MF2SOLVERLIBS} \
$MKLLIB -L$MARC_MKL -liomp5 \
$MARC_LIB/blas_src.a ${ACSI_LIB}/ACSI_MarcLib.a $KDTREE2_LIB/kdtree2.a "
SOLVERLIBS_DLL=${SOLVERLIBS}
if test "$AEM_DLL" -eq 1
then
SOLVERLIBS_DLL="$MKLLIB -L$MARC_MKL -liomp5 $MARC_LIB/blas_src.a"
fi
MRCLIBS="$MARC_LIB/clib.a ${CASISOLVERLIBS}"
MRCLIBSPAR="$MARC_LIB/clib.a"
STUBS="$MARC_LIB/stubs.a "
MNFLIBS="$MARC_LIB/libmnf.a"
MDUSER="$MARC_LIB/md_user.a"
if test "X$MARC_SIMUFACT" != "X"
then
SFLIB="-L$SFMATDIR -lMBA_Grain $SFMATDIR/sfclib.a "
else
SFLIB=" "
fi
OPENMP="-qopenmp"
if test "$AEM_DLL" -eq 1
then
LOAD_DLL=$LOAD
OPENMP=
LIBMNF=
OPENSSL=NONE
fi
SYSLIBS=" $OPENMP -lpthread -cxxlib"
# Uncomment the following lines to turn on the trace and comment out the next 4 lines
# if test $MPITYPE = intelmpi
# then
# SYSLIBS="-L${VT_ROOT}/lib -lVT -ldwarf -lelf -lm -lpthread \
# -L${MPI_ROOT}/lib64 -lmpi -lmpiif -lmpigi -lrt"
# fi
if test $MPITYPE = intelmpi
then
SYSLIBS="-L${MPI_ROOT}/lib -lmpi_mt -lmpifort -lrt $OPENMP -threads -lpthread -cxxlib"
fi
SYSLIBSPAR=" "
MARC_DLL_CODES="runmarc.f"
BLAS_SRC="dzero.f icopy.f izero.f"
if test "$_OEM_NASTRAN" -ne 0
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
BLAS_SRC="$BLAS_SRC dsctr.f zsctr.f dzasum.f daxpyi.f zaxpyi.f dgthr.f zgthr.f"
else
BLAS_SRC="ALL"
fi
fi
LOW_OPT_CODES="are163.f contro.f ndext.f omarc.f omarca.f omarcb.f omarcc.f \
omars.f fixbc.f triang.f bet049.f norst3.f eldata.f \
elec*.f elct*.f fmeig.f oada00.f ogeig.f updtrbe2.f cycrota.f \
cordef.f ogpk.f ogtan.f eldam.f formrbe3.f \
inertie.f em_sso072.f cn_fol3d_qpatch6.f cosim_begin.f"
if test "$MARC_INTEGER_SIZE" = "i8" ; then
LOW_OPT_CODES="$LOW_OPT_CODES bbcseg.f"
fi
HIGH_OPT_CODES="dpsmsa1.f dpsmsa2.f dpsmsa3.f dpsmsa4.f dpsmsa5.f dpsmsa6.f \
dpsmsa7.f dpsmsa8.f dpsmsa9.f dpsmsa10.f dpsmsa11.f dpsmsa12.f \
dpsmsa13.f dpsmsa14.f dpsmsa15.f dpsmsa16.f dpsmsah.f tpsmsah.f cn_qsort4_11.f "
MAXNUM=1000000

View File

@ -0,0 +1,774 @@
#
# General definitions for the Marc 2018.0 version
#
# EM64T
#
# Linux RedHat 7.1 / SuSE 11 SP4
#
# 64 bit MPI version
#
# Intel(R) Fortran Intel(R) 64 Compiler XE for applications
# running on Intel(R) 64, Version 17.0.2.174 Build 20170213
#
# Intel(R) C Intel(R) 64 Compiler XE for applications
# running on Intel(R) 64, Version 17.0.2.174 Build 20170213
#
# To check the O/S level, type:
# uname -a
#
# Distributed parallel MPI libraries:
# 1) HP MPI 2.3
# To check the mpi version, type:
# mpirun -version
# 2) Intel MPI 2017.1
# To check the mpi version, type:
# mpiexec.hydra -version
#
# To check the Compiler level, type using the compiler
# installation path:
# ifort -V
# icc -V
#
# REMARKS : This file contains the definitions of variables used during
# compilation loading and use of the MARC programmes . The
# current machine type is identified by means of the variable
# MACHINE , defined below.
#
#
# MPI_ROOT: root directory in which mpi shared libraries, etc. are located
# DIRJOB : directory in which spawned jobs should look for Marc input
# MPI_ARCH: system architecture
# MPI_EPATH: path where executable resides
#
REVISION="VERSION, BUILD"
HOSTNAME=`hostname`
# find available memory in Mbyte on the machine
# can be set explicitly
MEMLIMIT=`free -m | awk '/Mem:/ {print $2}'`
# set _OEM_NASTRAN to 1 for MD Nastran build
# override _OEM_NASTRAN setting with MARC_MD_NASTRAN environment variable
_OEM_NASTRAN="${MARC_MD_NASTRAN:-0}"
# uncomment the following line for an autoforge build
#AUTOFORGE=1
AUTOFORGE=0
export AUTOFORGE
# integer size
if test "$MARC_INTEGER_SIZE" = "" ; then
INTEGER_PATH=
else
INTEGER_PATH=/$MARC_INTEGER_SIZE
fi
FCOMP=ifort
INTELPATH="/opt/intel/compilers_and_libraries_2017/linux"
# find the root directory of the compiler installation:
# - if ifort is found in $PATH, then the root directory is derived
# from the path to ifort
# - if ifort is not found in $PATH, the root directory is assumed
# to be $INTELPATH and the directory in which ifort is found is
# added to $PATH
FCOMPPATH=`which "$FCOMP" 2>/dev/null`
if test -n "$FCOMPPATH"; then
# derive the root directory from $FCOMPPATH
FCOMPROOT="${FCOMPPATH%/bin/intel64/$FCOMP}"
if test "$FCOMPROOT" = "$FCOMPPATH"; then
FCOMPROOT="${FCOMPPATH%/bin/$FCOMP}"
fi
if test "$FCOMPROOT" = "$FCOMPPATH"; then
FCOMPROOT=
fi
elif test -d "$INTELPATH"; then
# check for compiler in $INTELPATH
if test -d "$INTELPATH/bin/intel64" -a \
-x "$INTELPATH/bin/intel64/$FCOMP" ; then
FCOMPROOT="$INTELPATH"
PATH="$INTELPATH/bin/intel64:$PATH"
elif test -d "$INTELPATH/bin" -a \
-x "$INTELPATH/bin/$FCOMP"; then
FCOMPROOT="$INTELPATH"
PATH="$INTELPATH/bin:$PATH"
else
FCOMPROOT=
fi
else
FCOMPROOT=
fi
# AEM
if test "$MARCDLLOUTDIR" = ""; then
DLLOUTDIR="$MARC_LIB"
else
DLLOUTDIR="$MARCDLLOUTDIR"
fi
# settings for MKL
if test "$IMKLDIR" = ""; then
MARC_MKL="$FCOMPROOT/mkl/lib/intel64"
else
MARC_MKL=$IMKLDIR/lib/intel64
fi
#
# settings for Metis
#
METIS="-I$METIS_SOURCE/include"
METISLIBS="$METISLIB_DIR/libmarcddm.a $METISLIB_DIR/libmarcmetis.a "
#
# settings for MPI
#
# RCP and RSH are used for parallel network runs
# replace with similar commands like rsh if needed
RCP=/usr/bin/scp
RSH=/usr/bin/ssh
#
MPI_DEFAULT=intelmpi
MPI_OTHER=hpmpi
MPITYPE=$MPI_DEFAULT
if test $AUTOFORGE
then
if test $AUTOFORGE = 1
then
MPITYPE=none
fi
fi
# overrule MPITYPE setting with environmental variable MARC_MPITYPE
if test $MARC_MPITYPE
then
MPITYPE=$MARC_MPITYPE
fi
# always set MPITYPE to none for MD Nastran
if test "$_OEM_NASTRAN" -ne 0
then
MPITYPE=none
fi
# Edit following lines to build with GPGPU version of BCS Solver for
# NVIDIA platforms
#BCSGPUSOLVER=NONE
BCSGPUSOLVER=BCSGPU
# Edit following lines to set the openssl library
if test "$OPENSSL" != "NONE"
then
OPENSSL_LIB="$MARC_LIB/libcrypto.a"
fi
OPENSSL_INCLUDE=-I"$MARC_OPENSSL/include/"
# activate contact component build if flagged
AEM_DLL=0
if test "$AEM_BUILD" = "ON" ; then
AEM_DLL=1
LINK_MARC_DLL="-shared -fPIC"
EXT_DLL="so"
MPITYPE=none
MPI_OTHER=
BCSGPUSOLVER=NONE
MUMPSSOLVER=NONE
CASISOLVER=NONE
fi
SOLVERFLAGS=
if test "$BCSGPUSOLVER" = BCSGPU
then
SOLVERFLAGS="$SOLVERFLAGS -DBCSGPU -DCUDA"
BCS_DIR=bcsgpusolver
else
BCS_DIR=bcssolver
fi
#
# settings for MPI
#
DDM=
if test $MPITYPE != none
then
if test $MPITYPE = hpmpi
then
FCOMPMPI=mpif90
export MPI_ROOT=$MARC_HPMPI
export MPI_REMSH=$RSH
export MPI_F77=$FCOMP
ARCHITECTURE=linux_amd64
DDM="-I$MPI_ROOT/include/64 -DDDM -DHPMPI"
MPI_CLEAN=
export MPI_EPATH=$MARC_BIN
export LD_LIBRARY_PATH=$MPI_ROOT/lib/$ARCHITECTURE:$MARC_LIB:$MARC_LIB_SHARED:$LD_LIBRARY_PATH
export MPIHPSPECIAL="-e MPI_FLAGS=E,T,y1"
# Below line is moved in run_marc file
# export MPIHPSPECIAL="$MPIHPSPECIAL -e LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export MPIHPSPECIAL="$MPIHPSPECIAL -e BINDIR=$MARC_BIN"
if test -n "$MSC_LICENSE_FILE"
then
export MPIHPSPECIAL="$MPIHPSPECIAL -e MSC_LICENSE_FILE=$MSC_LICENSE_FILE"
fi
if test -n "$LM_LICENSE_FILE"
then
export MPIHPSPECIAL="$MPIHPSPECIAL -e LM_LICENSE_FILE=$LM_LICENSE_FILE"
fi
export MPIHPSPECIAL="$MPIHPSPECIAL -e MPI_LIC_CHECKER=$MPI_ROOT/bin/licensing/amd64_s8/lichk.x"
RUN_JOB2="$MPI_ROOT/bin/mpirun ${MPIRUNOPTIONS} -prot -f "
RUN_JOB1="$MPI_ROOT/bin/mpirun ${MPIRUNOPTIONS} -prot -w $MPIHPSPECIAL -np "
RUN_JOB0=
fi
if test $MPITYPE = intelmpi
then
INTELMPI_VERSION=HYDRA
FCOMPMPI=mpiifort
MPI_ROOT=$MARC_INTELMPI
DDM="-I${MPI_ROOT}/include -DDDM"
PATH=$MPI_ROOT/bin:$PATH
export PATH
LD_LIBRARY_PATH=$MPI_ROOT/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
if test $INTELMPI_VERSION = HYDRA
then
RUN_JOB1="${MPI_ROOT}/bin/mpiexec.hydra -genvall -n "
RUN_JOB2="${MPI_ROOT}/bin/mpiexec.hydra -genvall"
else
RUN_JOB1="${MPI_ROOT}/bin/mpiexec -n "
RUN_JOB2="${MPI_ROOT}/bin/mpiexec -configfile "
fi
RUN_JOB0=
MPI_CLEAN=
MPI_EPATH=$MARC_BIN
MPIR_HOME=$MPI_ROOT
MPICH_F77=$FCOMP
MPICH_F77LINKER=$FCOMP
export MPI_ROOT MPI_EPATH MPIR_HOME MPICH_F77 MPICH_F77LINKER
I_MPI_PIN_DOMAIN=node
export I_MPI_PIN_DOMAIN
fi
else
MPI_ROOT=$MARC_DUMMYMPI
export MPI_ROOT=$MARC_DUMMYMPI
DDM="-I$MPI_ROOT/include"
fi
#
# variables for the "maintain" script
#
MACHINENAME=LINUX
MACHINE64BIT=yes
MACHINE=Linux_EM64T
DEV=/dev/tape
GETLOG="whoami"
CLEAR="clear"
MY_UNAME=`uname -a`
# Edit following 2 lines to build with VKI Solver
#VKISOLVER=VKI
VKISOLVER=NONE
# Edit following 2 lines to build with CASI Solver
CASISOLVER=CASI
if test "$MARC_CASISOLVER" = "NONE" ; then
CASISOLVER=NONE
fi
#CASISOLVER=NONE
# Edit following 2 lines to build with MF2 Solver
MF2SOLVER=NONE
#MF2SOLVER=SERIAL
#MF2SOLVER=MF2PARALLEL
# Edit following lines to build with Intel(c) Multithreaded solver (PARDISO)
#INTELSOLVER=NONE
INTELSOLVER=PARDISO
# Edit following lines to build with MUMPS
if test "$MARC_INTEGER_SIZE" = "i4" ; then
#MUMPSSOLVER=NONE
MUMPSSOLVER=MUMPS
else
#MUMPSSOLVER=NONE
MUMPSSOLVER=MUMPS
fi
# Edit following 2 lines to build MARC dynamic shared library
MARC_DLL=MARC_DLL
MARC_DLL=NONE
# always set VKISOLVER, CASISOLVER, BCSGPUSOLVER, and MARC_DLL to NONE for MD Nastran
if test "$_OEM_NASTRAN" -ne 0
then
VKISOLVER=NONE
CASISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
MUMPSSOLVER=NONE
BCSGPUSOLVER=NONE
MARC_DLL=NONE
fi
if test "$AEM_DLL" -eq 1
then
VKISOLVER=NONE
CASISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
MUMPSSOLVER=NONE
BCSGPUSOLVER=NONE
fi
#
# define Fortran and C compile syntax
#
if test "$VKISOLVER" = VKI
then
SOLVERFLAGS="$SOLVERFLAGS -DVKI"
fi
if test "$CASISOLVER" = CASI
then
SOLVERFLAGS="$SOLVERFLAGS -DCASI"
fi
if test "$MF2SOLVER" = MF2PARALLEL
then
SOLVERFLAGS="$SOLVERFLAGS -DMF2PARALLEL"
fi
if test "$MF2SOLVER" = MF2SERIAL
then
SOLVERFLAGS="$SOLVERFLAGS -DMF2SERIAL"
fi
if test "$INTELSOLVER" = PARDISO
then
SOLVERFLAGS="$SOLVERFLAGS -DPARDISO"
fi
if test "$MUMPSSOLVER" = MUMPS
then
SOLVERFLAGS="$SOLVERFLAGS -DMUMPS"
fi
if test "$MARC_DLL" = MARC_DLL
then
SOLVERFLAGS="$SOLVERFLAGS -DMARC_DLL"
fi
LINK_OPT=
DEBUG_OPT=
C_DEBUG_OPT=
#Uncomment following line to build Marc in debuggable mode
MARCDEBUG=
#MARCDEBUG="ON"
if test "$MARCDEBUG" = "ON"
then
LINK_OPT="-debug -traceback"
DEBUG_OPT="-debug -traceback"
C_DEBUG_OPT="-debug -traceback"
fi
MARCCHECK=
#MARCCHECK="ON"
if test "$MARCCHECK" = "ON"
then
DEBUG_OPT="$DEBUG_OPT -fpe0 -fp-stack-check -check all -ftrapuv "
C_DEBUG_OPT="$C_DEBUG_OPT -fp-stack-check -check-uninit -Wformat -ftrapuv "
fi
MARCCODECOV=
#MARCCODECOV="ON"
MARCCODEPROF=
#MARCCODEPROF="ON"
if test "$MARC_INTEGER_SIZE" = "i4" ; then
I8FFLAGS=
I8DEFINES=
I8CDEFINES=
else
I8FFLAGS="-i8"
I8DEFINES="-DI64"
I8CDEFINES="-U_DOUBLE -D_SINGLE"
fi
MTHREAD=OPENMP
if test "$MARC_OPENMP" = "NONE" ; then
MTHREAD=NONE
fi
#MTHREAD=NONE
if test "$_OEM_NASTRAN" -ne 0
then
MTHREAD=NONE
fi
if test "$AEM_DLL" -eq 1
then
MTHREAD=NONE
CASISOLVER=NONE
VKISOLVER=NONE
MF2SOLVER=NONE
INTELSOLVER=NONE
BCSGPUSOLVER=NONE
OPENSSL_LIB=
MARC_DLL=NONE
METISLIBS=
fi
OMP_COMPAT=NO
OMP_COMPAT=YES
if test "$MTHREAD" = "NONE"
then
OMP_COMPAT=NO
fi
CDEFINES=
FDEFINES=
if test "$_OEM_NASTRAN" -ne 0
then
CDEFINES="$CDEFINES -D_OEM_NASTRAN"
FDEFINES="$FDEFINES -D_OEM_NASTRAN"
fi
FDEFINES="$FDEFINES -D_IMPLICITNONE"
if test "$_OEM_NASTRAN" -eq 0
then
FDEFINES="$FDEFINES -DMKL -DOPENMP"
fi
if test "$OMP_COMPAT" = "YES"
then
FDEFINES="$FDEFINES -DOMP_COMPAT"
fi
# -D_MSCMARC
FDEFINES="$FDEFINES -D_MSCMARC $DEBUG_OPT $MARC_SIMUFACT"
CDEFINES="$CDEFINES -D_MSCMARC $C_DEBUG_OPT $I8CDEFINES"
if test "$AEM_DLL" -eq 1
then
FDEFINES="$FDEFINES -D_AEMNL -DAAA"
CDEFINES="$CDEFINES -D_AEMNL -DAAA"
fi
CINCL="-I$MARC_SOURCE/mdsrc -I$MARC_SOURCE/csource $METIS"
if test "$_OEM_NASTRAN" -ne 0
then
CINCL="$CINCL -I../../include"
fi
CC_OPT=
if test "$MTHREAD" = "OPENMP"
then
CC_OPT=" $CC_OPT -qopenmp"
fi
CC="icc -c $CC_OPT -O1 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCLOW="icc -c $CC_OPT -O0 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCHIGH="icc -c $CC_OPT -O3 $I8DEFINES -DLinux -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
if test "$MARCDEBUG" = "ON"
then
CC="icc -c $CC_OPT -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCLOW="icc $CC_OPT -c -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
CCHIGH="icc $CC_OPT -c -DLinux $I8DEFINES -DLINUX -DLinux_intel $CDEFINES $CINCL $SOLVERFLAGS $OPENSSL_INCLUDE "
fi
LOAD_CC="icc $CC_OPT -O1 -DLinux -DLINUX -DLinux_intel"
CCT="$CC"
CCTLOW="$CCLOW"
CCTHIGH="$CCHIGH"
#PROFILE="-Mprof=func"
#PROFILE="-Mprof=lines"
#PROFILE="-Mprof=func,mpi"
PROFILE=
#PROFILE="-init=snan,arrays -CB -traceback -fpe0 -fp-stack-check -check all -check uninit -ftrapuv"
if test "$MARCCODECOV" = "ON"
then
PROFILE="-prof-gen=srcpos"
fi
if test "$MARCCODEPROF" = "ON"
then
PROFILE=" $PROFILE -pg"
fi
FORT_OPT="-c -assume byterecl -safe_cray_ptr -mp1 -WB -fp-model source"
if test "$MTHREAD" = "OPENMP"
then
FORT_OPT=" $FORT_OPT -qopenmp"
if test "$OMP_COMPAT" = "YES"
then
FORT_OPT=" $FORT_OPT -qopenmp-threadprivate=compat"
fi
else
# FORT_OPT=" $FORT_OPT -auto "
FORT_OPT=" $FORT_OPT -save -zero"
fi
FORTLOW="$FCOMP $FORT_OPT $PROFILE -O0 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTRAN="$FCOMP $FORT_OPT $PROFILE -O1 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias -O3 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTNA="$FCOMP $FORT_OPT -fno-alias -O3 $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
# for compiling free form f90 files. high opt, integer(4)
FORTF90="$FCOMP -c -O3"
if test "$MARCDEBUG" = "ON"
then
FORTLOW="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTRAN="$FCOMP $FORT_OPT $PROFILE $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTHIGH="$FCOMP $FORT_OPT $PROFILE -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM $SOLVERFLAGS -I$KDTREE2_MOD"
FORTNA="$FCOMP $FORT_OPT -fno-alias $I8FFLAGS -I$MARC_SOURCE/common \
$MUMPS_INCLUDE $I8DEFINES -DLinux -DLINUX -DLinux_intel $FDEFINES $DDM"
fi
FORTLOWT="$FORTLOW"
FORTRANT="$FORTRAN"
FORTHIGHT="$FORTHIGH"
FORTRANMNF="$FCOMP -c $FDEFINES "
CCMNF="icc -c -O1 -DLinux -DLINUX -DLinux_intel -Dport2egcs -I$MARC_SOURCE/marctoadams/mnf/include -D_LARGEFILE64_SOURCE"
if test $MPITYPE != none
then
if test $MPITYPE = hpmpi
then
LOAD="$MPI_ROOT/bin/$FCOMPMPI ${LOADOPTIONS} -L$MPI_ROOT/lib/$ARCHITECTURE $PROFILE $LINK_OPT -o "
LOADT="$MPI_ROOT/bin/$FCOMPMPI ${LOADOPTIONS} -L$MPI_ROOT/lib/$ARCHITECTURE $PROFILE $LINK_OPT -o "
fi
# Uncomment the following lines to turn on the tracer and commnet out the next 5 lines
# if test $MPITYPE = intelmpi
# then
# INCLUDEMPI="-I$MPI_ROOT/include -I$VT_ROOT/include"
# LOAD="$MPI_ROOT/bin/$FCOMPMPI $PROFILE $INCLUDEMPI -g -t=log $LINK_OPT -o "
# LOADT="$MPI_ROOT/bin/$FCOMPMPI $PROFILE $INCLUDEMPI -g -t=log $LINK_OPT -o "
# fi
if test $MPITYPE = intelmpi
then
LOAD="ifort $PROFILE $LINK_OPT -o "
LOADT="ifort $PROFILE $LINK_OPT -o "
fi
else
LOAD="$FCOMP $LINK_OPT -o "
LOADT="$FCOMP $LINK_OPT -o "
fi
if test "$MARC_DLL" = MARC_DLL
then
FORTLOW="$FORTLOW -fpp -fPIC"
FORTRAN="$FORTRAN -fpp -fPIC"
FORTHIGH="$FORTHIGH -fpp -fPIC"
FORTRANMNF="$FORTRANMNF -fpp -fPIC"
CC="$CC -fPIC"
CCMNF="$CCMNF -fPIC"
LINK_EXE_MARC="-L$MARC_LIB -lmarc -L$MARC_LIB_SHARED -lguide -lpthread"
LINK_MARC_DLL="-shared -fPIC"
LOAD_DLL=$LOAD
LOADT_DLL=$LOADT
EXT_DLL="so"
fi
if test "$AEM_DLL" -eq 1
then
FORTLOW="$FORTLOW -fpp -fPIC"
FORTRAN="$FORTRAN -fpp -fPIC"
FORTHIGH="$FORTHIGH -fpp -fPIC"
FORTRANMNF="$FORTRANMNF -fpp -fPIC"
CC="$CC -fPIC"
CCMNF="$CCMNF -fPIC"
LINK_EXE_MARC="-L$MARC_LIB -lmarc -L$MARC_LIB_SHARED -lguide"
LINK_MARC_DLL="-shared -fPIC"
LOAD_DLL=$LOAD
LOADT_DLL=$LOADT
EXT_DLL="so"
fi
XLIBS="-L/usr/X11/lib -lX11 "
#
# define archive and ranlib syntax
#
ARC="ar rvl"
ARD="ar dvl"
ARX="ar xl"
RAN=""
#
# choose which libraries you want to use ( e.g. blas )
#
if test "$VKISOLVER" = VKI
then
VKISOLVERLIBS="$MARC_LIB/vkisolver.a"
else
VKISOLVERLIBS=
fi
if test "$CASISOLVER" = CASI
then
CASISOLVERLIBS="$CASILIB_DIR/libmarccasi.a $CASILIB_DIR/libcasi.a"
else
CASISOLVERLIBS=
fi
MF2SOLVERLIBS=
if test "$MF2SOLVER" = MF2PARALLEL
then
MF2SOLVERLIBS="$MARC_LIB/mf2parallel/libseq.a \
$MARC_LIB/mf2parallel/libsym.a \
$MARC_LIB/mf2parallel/libmet.a \
$MARC_LIB/mf2parallel/libmf2.a \
$MARC_LIB/mf2parallel/libgauss.a \
$MARC_LIB/mf2parallel/libmf2.a \
$MARC_LIB/mf2parallel/libgauss.a \
$MARC_LIB/mf2parallel/libnum.a \
$MARC_LIB/mf2parallel/libutl.a \
$MARC_LIB/mf2parallel/libr8.a \
$MARC_LIB/mf2parallel/libz.a "
fi
if test "$MUMPSSOLVER" = MUMPS
then
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
if test $MPITYPE = none
then
MUMPSSOLVERLIBS2=
echo hello > /dev/null
fi
if test $MPITYPE = intelmpi
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_lp64.a "
else
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_ilp64.a "
fi
fi
if test $MPITYPE = hpmpi
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_lp64.a"
else
MUMPSSOLVERLIBS2=" $MARC_MKL/libmkl_blacs_intelmpi_ilp64.a"
fi
fi
else
MUMPSSOLVERLIBS=
MUMPSSOLVERLIBS2=
fi
if test "$BCSGPUSOLVER" = BCSGPU
then
BCSSOLVERLIBS="${BCSLIB_DIR}/bcsgpulib.a "
MARCCUDALIBS1="-L${BCSLIB_DIR}/cuda_dummy -lmarccuda "
MARCCUDALIBS2="-L${BCSLIB_DIR}/cuda -lmarccuda "
MARCCUDALIBS=$MARCCUDALIBS1
else
BCSSOLVERLIBS="${MARC_LIB}/bcslib.a "
fi
if test "$AEM_DLL" -eq 1
then
BCSSOLVERLIBS=
fi
if test "$MARC_INTEGER_SIZE" = "i4" ; then
MKLLIB="$MARC_MKL/libmkl_scalapack_lp64.a -Wl,--start-group $MARC_MKL/libmkl_intel_lp64.a $MARC_MKL/libmkl_intel_thread.a $MARC_MKL/libmkl_core.a $MUMPSSOLVERLIBS2 -Wl,--end-group"
else
MKLLIB="$MARC_MKL/libmkl_scalapack_ilp64.a -Wl,--start-group $MARC_MKL/libmkl_intel_ilp64.a $MARC_MKL/libmkl_intel_thread.a $MARC_MKL/libmkl_core.a $MUMPSSOLVERLIBS2 -Wl,--end-group"
fi
SECLIBS="-L$MARC_LIB -llapi"
SOLVERLIBS="${BCSSOLVERLIBS} ${VKISOLVERLIBS} ${CASISOLVERLIBS} ${MF2SOLVERLIBS} \
$MKLLIB -L$MARC_MKL -liomp5 \
$MARC_LIB/blas_src.a ${ACSI_LIB}/ACSI_MarcLib.a $KDTREE2_LIB/kdtree2.a "
SOLVERLIBS_DLL=${SOLVERLIBS}
if test "$AEM_DLL" -eq 1
then
SOLVERLIBS_DLL="$MKLLIB -L$MARC_MKL -liomp5 $MARC_LIB/blas_src.a"
fi
MRCLIBS="$MARC_LIB/clib.a ${CASISOLVERLIBS}"
MRCLIBSPAR="$MARC_LIB/clib.a"
STUBS="$MARC_LIB/stubs.a "
MNFLIBS="$MARC_LIB/libmnf.a"
MDUSER="$MARC_LIB/md_user.a"
if test "X$MARC_SIMUFACT" != "X"
then
SFLIB="-L$SFMATDIR -lMBA_Grain $SFMATDIR/sfclib.a "
else
SFLIB=" "
fi
OPENMP="-qopenmp"
if test "$AEM_DLL" -eq 1
then
LOAD_DLL=$LOAD
OPENMP=
LIBMNF=
OPENSSL=NONE
fi
SYSLIBS=" $OPENMP -lpthread -shared-intel -cxxlib"
# Uncomment the following lines to turn on the trace and comment out the next 4 lines
# if test $MPITYPE = intelmpi
# then
# SYSLIBS="-L${VT_ROOT}/lib -lVT -ldwarf -lelf -lm -lpthread \
# -L${MPI_ROOT}/lib64 -lmpi -lmpiif -lmpigi -lrt"
# fi
if test $MPITYPE = intelmpi
then
SYSLIBS="-L${MPI_ROOT}/lib -lmpi_mt -lmpifort -lrt $OPENMP -threads -lpthread -shared-intel -cxxlib"
fi
SYSLIBSPAR=" "
MARC_DLL_CODES="runmarc.f"
BLAS_SRC="dzero.f icopy.f izero.f"
if test "$_OEM_NASTRAN" -ne 0
then
if test "$MARC_INTEGER_SIZE" = "i4" ; then
BLAS_SRC="$BLAS_SRC dsctr.f zsctr.f dzasum.f daxpyi.f zaxpyi.f dgthr.f zgthr.f"
else
BLAS_SRC="ALL"
fi
fi
LOW_OPT_CODES="are163.f contro.f ndext.f omarc.f omarca.f omarcb.f omarcc.f \
omars.f fixbc.f triang.f bet049.f norst3.f eldata.f \
elec*.f elct*.f fmeig.f oada00.f ogeig.f updtrbe2.f cycrota.f \
cordef.f ogpk.f ogtan.f eldam.f formrbe3.f \
inertie.f em_sso072.f cn_fol3d_qpatch6.f cosim_begin.f"
if test "$MARC_INTEGER_SIZE" = "i8" ; then
LOW_OPT_CODES="$LOW_OPT_CODES bbcseg.f"
fi
HIGH_OPT_CODES="dpsmsa1.f dpsmsa2.f dpsmsa3.f dpsmsa4.f dpsmsa5.f dpsmsa6.f \
dpsmsa7.f dpsmsa8.f dpsmsa9.f dpsmsa10.f dpsmsa11.f dpsmsa12.f \
dpsmsa13.f dpsmsa14.f dpsmsa15.f dpsmsa16.f dpsmsah.f tpsmsah.f cn_qsort4_11.f "
MAXNUM=1000000

View File

@ -374,6 +374,12 @@ fi
# the one for IBM is defined futher down
LD_LIBRARY_PATH=$MARC_LIB_SHARED:$LD_LIBRARY_PATH
if test -f "/etc/redhat-release"; then
ver=`cat /etc/redhat-release | sed 's/.* release \([0-9]\).\([0-9]\) .*/\1\2/'`
case "$ver" in
6*) LD_LIBRARY_PATH="${MARC_LIB_SHARED}rh67:$LD_LIBRARY_PATH" ;;
esac
fi
LD_LIBRARY_PATH=$MARC_LIB:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$MESHERDIR:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$SFMATDIR:$LD_LIBRARY_PATH
@ -533,7 +539,7 @@ if test -n "$value"; then
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
fi
fi
@ -807,14 +813,14 @@ do
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
else
exefile=marc
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
fi
fi
;;
@ -3213,14 +3219,14 @@ then
then
if test $MACHINENAME = "CRAY"
then
$DFORTRAN $user || \
$DFORTHIGHMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTRAN $user -o $userobj || \
$DFORTHIGHMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1
@ -3486,7 +3492,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_hmp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3515,7 +3521,7 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTRAN $user || \
$DFORTHIGHMP $user || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3523,7 +3529,7 @@ then
}
/bin/rm $program 2>/dev/null
else
$DFORTRAN $user -o $userobj || \
$DFORTHIGHMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3824,7 +3830,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_hmp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3852,14 +3858,14 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTRAN $user || \
$DFORTHIGHMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTRAN $user -o $userobj || \
$DFORTHIGHMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -374,6 +374,12 @@ fi
# the one for IBM is defined futher down
LD_LIBRARY_PATH=$MARC_LIB_SHARED:$LD_LIBRARY_PATH
if test -f "/etc/redhat-release"; then
ver=`cat /etc/redhat-release | sed 's/.* release \([0-9]\).\([0-9]\) .*/\1\2/'`
case "$ver" in
6*) LD_LIBRARY_PATH="${MARC_LIB_SHARED}rh67:$LD_LIBRARY_PATH" ;;
esac
fi
LD_LIBRARY_PATH=$MARC_LIB:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$MESHERDIR:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$SFMATDIR:$LD_LIBRARY_PATH
@ -533,7 +539,7 @@ if test -n "$value"; then
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
fi
fi
@ -807,14 +813,14 @@ do
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
else
exefile=marc
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
fi
fi
;;
@ -3213,14 +3219,14 @@ then
then
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$DFORTLOWMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$DFORTLOWMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1
@ -3486,7 +3492,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_l $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_lmp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3515,7 +3521,7 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$DFORTLOWMP $user || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3523,7 +3529,7 @@ then
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$DFORTLOWMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3824,7 +3830,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_l $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_lmp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3852,14 +3858,14 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$DFORTLOWMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$DFORTLOWMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -374,6 +374,12 @@ fi
# the one for IBM is defined futher down
LD_LIBRARY_PATH=$MARC_LIB_SHARED:$LD_LIBRARY_PATH
if test -f "/etc/redhat-release"; then
ver=`cat /etc/redhat-release | sed 's/.* release \([0-9]\).\([0-9]\) .*/\1\2/'`
case "$ver" in
6*) LD_LIBRARY_PATH="${MARC_LIB_SHARED}rh67:$LD_LIBRARY_PATH" ;;
esac
fi
LD_LIBRARY_PATH=$MARC_LIB:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$MESHERDIR:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$SFMATDIR:$LD_LIBRARY_PATH
@ -533,7 +539,7 @@ if test -n "$value"; then
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
fi
fi
@ -807,14 +813,14 @@ do
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
else
exefile=marc
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
fi
fi
;;
@ -3213,14 +3219,14 @@ then
then
if test $MACHINENAME = "CRAY"
then
$DFORTHIGH $user || \
$DFORTRANMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTHIGH $user -o $userobj || \
$DFORTRANMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1
@ -3486,7 +3492,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_h $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_mp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3515,7 +3521,7 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTHIGH $user || \
$DFORTRANMP $user || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3523,7 +3529,7 @@ then
}
/bin/rm $program 2>/dev/null
else
$DFORTHIGH $user -o $userobj || \
$DFORTRANMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
echo " $PRODUCT Exit number 3"
@ -3824,7 +3830,7 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_h $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_damask_mp $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
@ -3852,14 +3858,14 @@ then
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTHIGH $user || \
$DFORTRANMP $user || \
{
echo "$0: compile failed for $user"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTHIGH $user -o $userobj || \
$DFORTRANMP $user -o $userobj || \
{
echo "$0: compile failed for $user"
exit 1

View File

@ -299,23 +299,7 @@ fi
. "$DIR/getarch"
# getting user subroutine file name
found=0
for i in "$@"; do
if test $found = 1; then
DAMASK_USER=$i
found=0
fi
case $i in
-u* | -U*)
found=1
;;
esac
done
# sourcing include_linux64 (needs DAMASK_USER to be set)
. $MARC_INCLUDE
#
#
@ -349,7 +333,7 @@ export BINDIR
AFMATDAT=$MARC_RUNTIME/AF_flowmat/
export AFMATDAT
export MESHERDIR
MSC_LICENSE_FINPROC=1
MSC_LICENSE_FINPROC=0
export MSC_LICENSE_FINPROC
#
# define directory path to global unified material database
@ -374,6 +358,12 @@ fi
# the one for IBM is defined futher down
LD_LIBRARY_PATH=$MARC_LIB_SHARED:$LD_LIBRARY_PATH
if test -f "/etc/redhat-release"; then
ver=`cat /etc/redhat-release | sed 's/.* release \([0-9]\).\([0-9]\) .*/\1\2/'`
case "$ver" in
6*) LD_LIBRARY_PATH="${MARC_LIB_SHARED}rh67:$LD_LIBRARY_PATH" ;;
esac
fi
LD_LIBRARY_PATH=$MARC_LIB:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$MESHERDIR:$LD_LIBRARY_PATH
LD_LIBRARY_PATH=$SFMATDIR:$LD_LIBRARY_PATH
@ -410,7 +400,7 @@ sid=
did=
vid=
user=
usernoext=
usersubname=
objs=
qid=background
cpu=
@ -533,7 +523,7 @@ if test -n "$value"; then
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
fi
fi
@ -577,7 +567,7 @@ do
justlist=yes
;;
-fe* | -FE*)
feature=$value
feature=$value
;;
-pr* | -PR*)
@ -673,19 +663,50 @@ do
esac
;;
-u* | -U*)
user=$value
user=`dirname $value`/`$BASENAME $value .f`
usersubname=$user
basefile=`$BASENAME $value`
if test ${basefile##*.} = f
then
user=`dirname $value`/`$BASENAME $value .f`
usersubname=$user.f
elif test ${basefile##*.} = F
then
user=`dirname $value`/`$BASENAME $value .F`
usersubname=$user.F
elif test ${basefile##*.} = f90
then
user=`dirname $value`/`$BASENAME $value .f90`
usersubname=$user.f90
elif test ${basefile##*.} = F90
then
user=`dirname $value`/`$BASENAME $value .F90`
usersubname=$user.F90
fi
case $user in
\/*)
;;
*)
user=`pwd`/$user
usersubname=`pwd`/$usersubname
;;
esac
usernoext=$user
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .F`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .for`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f90`
if test ! -f $usersubname
then
if test -f $usersubname.f
then
usersubname=$usersubname.f
elif test -f $usersubname.F
then
usersubname=$usersubname.F
elif test -f $usersubname.f90
then
usersubname=$usersubname.f90
elif test -f $usersubname.F90
then
usersubname=$usersubname.F90
fi
fi
;;
-obj | -OBJ)
objs="$value"
@ -807,14 +828,14 @@ do
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a_$value
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a_$value"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a_$value"
fi
else
exefile=marc
. $MARC_INCLUDE
MDSRCLIB=$MARC_LIB/mdsrc.a
if test "$MUMPSSOLVER" = MUMPS; then
MUMPSSOLVERLIBS="$MARC_LIB/libmumps.a"
MUMPSSOLVERLIBS="$MUMPSLIB_DIR/libmumps.a"
fi
fi
;;
@ -1005,6 +1026,7 @@ then
nprocdddm=
fi
nsolverprint=$nsolver
if test $nsolver -eq 0
then
nsolverprint=
@ -1144,12 +1166,12 @@ post file $DIRPID/$pid.t16 or $DIRPID/$pid.t19 not accessible"
fi
fi
fi
if test "$user"
if test "$usersubname"
then
if test ! -f $user
if test ! -f $usersubname
then
error="$error
user subroutine file $user not accessible"
user subroutine file $usersubname not accessible"
fi
fi
if test "$objs"
@ -1331,7 +1353,7 @@ else
else
error="$error
job id required"
fi
fi
fi
if test $nprocd -gt 1
@ -1480,7 +1502,7 @@ Program name : $prog
Marc shared lib : $progdll
Version type : $mode
Job ID : $DIRJID/$jid
User subroutine name : $user
User subroutine name : $usersubname
User objects/libs : $objs
Restart file job ID : $rid
Substructure file ID : $sid
@ -1513,7 +1535,7 @@ Program name : $prog
Marc shared lib : $progdll
Version type : $mode
Job ID : $DIRJID/$jid
User subroutine name : $user
User subroutine name : $usersubname
User objects/libs : $objs
Restart file job ID : $rid
Substructure file ID : $sid
@ -1637,7 +1659,7 @@ Program name ($prog)? $ECHOTXT"
;;
esac
fi
$ECHO "User subroutine name ($user)? $ECHOTXT"
$ECHO "User subroutine name ($usersubname)? $ECHOTXT"
read value
if test "$value"
then
@ -1646,19 +1668,50 @@ Program name ($prog)? $ECHOTXT"
user=
;;
*)
user=$value
user=`dirname $value`/`$BASENAME $value .f`
usersubname=$user
basefile=`$BASENAME $value`
if test ${basefile##*.} = f
then
user=`dirname $value`/`$BASENAME $value .f`
usersubname=$user.f
elif test ${basefile##*.} = F
then
user=`dirname $value`/`$BASENAME $value .F`
usersubname=$user.F
elif test ${basefile##*.} = f90
then
user=`dirname $value`/`$BASENAME $value .f90`
usersubname=$user.f90
elif test ${basefile##*.} = F90
then
user=`dirname $value`/`$BASENAME $value .F90`
usersubname=$user.F90
fi
case $user in
\/*)
;;
*)
\/*)
;;
*)
user=`pwd`/$user
;;
esac
usernoext=$user
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .F`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .for`
usernoext=`dirname $usernoext`/`$BASENAME $usernoext .f90`
usersubname=`pwd`/$usersubname
;;
esac
if test ! -f $usersubname
then
if test -f $usersubname.f
then
usersubname=$usersubname.f
elif test -f $usersubname.F
then
usersubname=$usersubname.F
elif test -f $usersubname.f90
then
usersubname=$usersubname.f90
elif test -f $usersubname.F90
then
usersubname=$usersubname.F90
fi
fi
;;
esac
fi
@ -1890,6 +1943,12 @@ Program name ($prog)? $ECHOTXT"
set ntsprint=0
fi
nts=$ntsprint
# Update print variable for -nsolver option
nsolverprint=$nsolver
if test $nsolver -eq 0
then
nsolverprint=
fi
$ECHO "GPGPU id option ($gpuids)? $ECHOTXT"
read value
if test "$value"
@ -2193,12 +2252,11 @@ fi
#
# user subroutine used
#
# add DAMASK options for linking
DAMASK="-lstdc++"
if test "$user"
then
program=$usernoext.marc
# program=$user.marc
program=$DIRJOB/`$BASENAME $user .f`.marc
case $program in
\/* | \.\/*)
bd=
@ -2311,7 +2369,7 @@ fi
fi
if test "$user"
then
execpath=$usernoext.marc
execpath=$DIRJOB/`$BASENAME $user .f`.marc
usersub=1
fi
export execpath
@ -3199,27 +3257,44 @@ then
echo
if test "$user"
then
userobj=$usermoext.o
userobj=$DIRJOB/`$BASENAME $user .f`.o
basefile=`$BASENAME $usersubname`
if test ${basefile##*.} = f
then
usersub=$DIRJOB/`$BASENAME $user .f`.F
ln -sf "$user.f" "$usersub"
else
usersub=$usersubname
fi
fi
cat > $jid.runmarcscript << END4
if test "$user"
then
if test ${basefile##*.} = f
then
ln -sf "$user.f" "$usersub"
fi
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$FORTRAN $usersub || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$FORTRAN $usersub -o $userobj || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
exit 1
}
/bin/rm $program 2>/dev/null
fi
if test ${basefile##*.} = f
then
/bin/rm -f "$usersub"
fi
fi
@ -3239,7 +3314,7 @@ then
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$DAMASK \
$OPENSSL_LIB \
$SYSLIBS \
$SFLIB \
$SECLIBS || \
@ -3252,7 +3327,6 @@ else
prgsav=yes
fi
/bin/rm $userobj 2>/dev/null
/bin/rm $DIRJOB/*.mod 2>/dev/null
#
# run marc
@ -3299,7 +3373,7 @@ if test $dllrun -eq 0; then
fi
else
if test $cpdll = yes; then
filename=$usernoext
filename=`basename $usersubname .f`
/bin/cp $DIRJOB/$marcdll $DIRJOB/${filename}_$marcdll 2>/dev/null
fi
if test $rmdll = yes
@ -3465,7 +3539,7 @@ then
# first copy over the user sub if local directories
if test ${dirstatus[$counter]} = "local"
then
$RCP $user $i:$DIR1/
$RCP $user.f $i:$DIR1/
fi
# do the compilation on the other machine
if test ${dirstatus[$counter]} = "shared"
@ -3478,21 +3552,21 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_l $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_user $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
then
echo compilation and linking successful on host $i
else
echo "$0: compile failed for $user on host $i"
echo "$0: compile failed for $user.f on host $i"
echo " $PRODUCT Exit number 3"
exit 1
fi
# remove the user subroutine on remote machine
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $remoteuser 2> /dev/null
$RSH $i /bin/rm $remoteuser.f 2> /dev/null
fi
fi
fi
@ -3502,27 +3576,39 @@ then
if test "$userhost"
then
echo
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "Compiling and linking user subroutine $user.f on host `hostname`"
fi
userobj=$DIRJOB/`$BASENAME $user .f`.o
basefile=`$BASENAME $usersubname`
if test ${basefile##*.} = f
then
usersub=$DIRJOB/`$BASENAME $user .f`.F
ln -sf "$user.f" "$usersub"
else
usersub=$usersubname
fi
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$FORTRAN $usersub || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
echo " $PRODUCT Exit number 3"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$FORTRAN $usersub -o $userobj || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
echo " $PRODUCT Exit number 3"
exit 1
}
/bin/rm $program 2>/dev/null
fi
if test ${basefile##*.} = f
then
/bin/rm -f "$usersub"
fi
fi # if test $user
@ -3542,7 +3628,7 @@ then
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$DAMASK \
$OPENSSL_LIB \
$SYSLIBS \
$SFLIB \
$SECLIBS || \
@ -3583,7 +3669,6 @@ else # if test $link
prgsav=yes
fi # if test $link
/bin/rm $userobj 2>/dev/null
/bin/rm $DIRJOB/*.mod 2>/dev/null
#
# run marc
@ -3642,42 +3727,42 @@ then
counter=0
if test -f "$host_filt"
then
for i in `$AWK -v n=$numfield '{print $n}' $host_filt`
do
ibase=${i%%.*}
if test $ibase != $thishost
then
counter=$((counter+1))
DIR1=$DIRJOB
line=`grep -v '^#' $userhost | grep "^$ibase "`
workdir=`echo $line | $AWK '{print $3}'`
if test -n "$workdir"
for i in `$AWK -v n=$numfield '{print $n}' $host_filt`
do
ibase=${i%%.*}
if test $ibase != $thishost
then
DIR1=$workdir
counter=$((counter+1))
DIR1=$DIRJOB
line=`grep -v '^#' $userhost | grep "^$ibase "`
workdir=`echo $line | $AWK '{print $3}'`
if test -n "$workdir"
then
DIR1=$workdir
fi
# if an incompatible host uses shared directory,
# then the root machine deletes the executable
if test ${dirstatus[$counter]} = "shared" -a ${compstatus[$counter]} = "no"
then
hname=_$ibase
/bin/rm ${execname}$hname
fi
# if local directory used, the remote machine
# deletes the executable
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $DIR1/${execname} 2>/dev/null
fi
fi
# if an incompatible host uses shared directory,
# then the root machine deletes the executable
if test ${dirstatus[$counter]} = "shared" -a ${compstatus[$counter]} = "no"
then
hname=_$ibase
/bin/rm ${execname}$hname
fi
# if local directory used, the remote machine
# deletes the executable
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $DIR1/${execname} 2>/dev/null
fi
fi
done
done
fi
fi
fi
fi
fi
else
#dllrun >0
if test $cpdll = yes; then
filename=$usernoext
filename=`basename $usersubname .f`
/bin/cp $DIRJOB/$marcdll $DIRJOB/${filename}_$marcdll 2>/dev/null
fi
if test $rmdll = yes;then
@ -3802,7 +3887,7 @@ then
# first copy over the user sub if local directories
if test ${dirstatus[$counter]} = "local"
then
$RCP $user $i:$DIR1/
$RCP $user.f $i:$DIR1/
fi
# do the compilation on the other machine
if test ${dirstatus[$counter]} = "shared"
@ -3815,20 +3900,20 @@ then
remoteuser=$DIR1/`$BASENAME $user`
$RSH $i /bin/rm $remoteprog 2> /dev/null
echo
$RSH $i $DIR2/tools/comp_damask_l $DIR2 $DIR1 $remoteuser $remoteprog
$RSH $i $DIR2/tools/comp_user $DIR2 $DIR1 $remoteuser $remoteprog
# check if successful, the new executable should be there
line=`$RSH $i /bin/ls $remoteprog 2> /dev/null`
if test "$line"
then
echo compilation and linking successful on host $i
else
echo "$0: compile failed for $user on host $i"
echo "$0: compile failed for $user.f on host $i"
exit 1
fi
# remove the user subroutine on remote machine
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $remoteuser 2> /dev/null
$RSH $i /bin/rm $remoteuser.f 2> /dev/null
fi
fi
fi
@ -3838,25 +3923,37 @@ then
if test "$userhost"
then
echo
echo "Compiling and linking user subroutine $user on host `hostname`"
echo "Compiling and linking user subroutine $user.f on host `hostname`"
fi
userobj=$DIRJOB/`$BASENAME $user .f`.o
basefile=`$BASENAME $usersubname`
if test ${basefile##*.} = f
then
usersub=$DIRJOB/`$BASENAME $user .f`.F
ln -sf "$user.f" "$usersub"
else
usersub=$usersubname
fi
userobj=$usernoext.o
if test $MACHINENAME = "CRAY"
then
$DFORTLOW $user || \
$FORTRAN $usersub || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
exit 1
}
/bin/rm $program 2>/dev/null
else
$DFORTLOW $user -o $userobj || \
$FORTRAN $usersub -o $userobj || \
{
echo "$0: compile failed for $user"
echo "$0: compile failed for $user.f"
exit 1
}
/bin/rm $program 2>/dev/null
fi
if test ${basefile##*.} = f
then
/bin/rm -f "$usersub"
fi
fi # if test $user
@ -3876,7 +3973,7 @@ then
$TKLIBS \
$MRCLIBS \
$METISLIBS \
$DAMASK \
$OPENSSL_LIB \
$SYSLIBS \
$SFLIB \
$SECLIBS || \
@ -3916,7 +4013,7 @@ else # if test $link
prgsav=yes
fi # if test $link
/bin/rm $userobj 2>/dev/null
/bin/rm $DIRJOB/*.mod 2>/dev/null
# done if no job id given
if test -z "$jid"
then
@ -3956,7 +4053,7 @@ if test $ddm_arc -gt 0; then
RUN_JOB="$MESHERDIR/sf_exeddm $RUN_JOB -ddm $ddm_arc "
fi
$RUN_JOB
$RUN_JOB
if test $nprocd -gt 1
then
@ -4000,42 +4097,42 @@ then
counter=0
if test -f "$host_filt"
then
for i in `$AWK -v n=$numfield '{print $n}' $host_filt`
do
ibase=${i%%.*}
if test $ibase != $thishost
then
counter=$((counter+1))
DIR1=$DIRJOB
line=`grep -v '^#' $userhost | grep "^$ibase "`
workdir=`echo $line | $AWK '{print $3}'`
if test -n "$workdir"
for i in `$AWK -v n=$numfield '{print $n}' $host_filt`
do
ibase=${i%%.*}
if test $ibase != $thishost
then
DIR1=$workdir
counter=$((counter+1))
DIR1=$DIRJOB
line=`grep -v '^#' $userhost | grep "^$ibase "`
workdir=`echo $line | $AWK '{print $3}'`
if test -n "$workdir"
then
DIR1=$workdir
fi
# if an incompatible host uses shared directory,
# then the root machine deletes the executable
if test ${dirstatus[$counter]} = "shared" -a ${compstatus[$counter]} = "no"
then
hname=_$ibase
/bin/rm ${execname}$hname
fi
# if local directory used, the remote machine
# deletes the executable
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $DIR1/${execname} 2>/dev/null
fi
fi
# if an incompatible host uses shared directory,
# then the root machine deletes the executable
if test ${dirstatus[$counter]} = "shared" -a ${compstatus[$counter]} = "no"
then
hname=_$ibase
/bin/rm ${execname}$hname
fi
# if local directory used, the remote machine
# deletes the executable
if test ${dirstatus[$counter]} = "local"
then
$RSH $i /bin/rm $DIR1/${execname} 2>/dev/null
fi
fi
done
done
fi
fi
fi
fi
fi
else
#dllrun >0
if test $cpdll = yes; then
filename=$usernoext
filename=`basename $usersubname .f`
/bin/cp $DIRJOB/$marcdll $DIRJOB/${filename}_$marcdll 2>/dev/null
fi
if test $rmdll = yes;then

View File

@ -0,0 +1,5 @@
#!/bin/sh
# This script opens a window running an editor.
# The command to invoke the editor is specified during DAMASK installation
%EDITOR% $*

View File

@ -0,0 +1,18 @@
#!/bin/sh
# This script opens a window running an editor. The default window is an
# xterm, and the default editor is vi. These may be customized.
dir=
for d in /usr/bin /usr/bin/X11; do
if test -x "$d/xterm"; then
dir="$d"
break
fi
done
if test -z "$dir"; then
echo "$0: Could not find xterm"
exit 1
fi
"$dir/xterm" -T "vi $*" -n "vi $*" -e vi $*

View File

@ -4,7 +4,7 @@
# Normal exit status is 0.
#
DIR=%INSTALLDIR%/marc%VERSION%
DIR=/nethome/storage/raid3/f.roters/Software/MSC/Marc2018-RH7.1/marc2018
if test $MARCDIR1
then
DIR=$MARCDIR1
@ -84,7 +84,6 @@ if [ "$srcfile" != "" -a "$srcfile" != "-" ]; then
srcfile="-u $srcfile -save y"
;;
runsaved)
srcfile=${srcfile%.*}".marc"
srcfile="-prog $srcfile"
;;
esac
@ -179,7 +178,7 @@ rm -f $job.log
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask_h" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
"${DIR}/tools/run_marc" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1

View File

@ -179,7 +179,7 @@ rm -f $job.log
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
"${DIR}/tools/run_damask_hmp" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1

View File

@ -179,7 +179,7 @@ rm -f $job.log
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask_h" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
"${DIR}/tools/run_damask_mp" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1

View File

@ -179,7 +179,7 @@ rm -f $job.log
unset PYTHONHOME
unset PYTHONPATH
"${DIR}/tools/run_damask_l" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
"${DIR}/tools/run_damask_lmp" $slv -j $job -v n -b y $nprocds $nprocd -autorst $autorst \
$srcfile $restart $postfile $viewfactorsfile $hostfile \
$compat $copy_datfile $copy_postfile $scr_dir $dcoup \
$assem_recov_nthread $nthread $nsolver $mode $gpu > /dev/null 2>&1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -58,15 +58,9 @@ echo "Editor: $EDITOR"
echo ''
echo 'adapting Marc tools...'
theDIR=$INSTALLDIR/marc$VERSION/tools
for filename in 'comp_damask' \
'comp_damask_l' \
'comp_damask_h' \
'comp_damask_mp' \
for filename in 'comp_damask_mp' \
'comp_damask_lmp' \
'comp_damask_hmp' \
'run_damask' \
'run_damask_l' \
'run_damask_h' \
'run_damask_mp' \
'run_damask_lmp' \
'run_damask_hmp' \
@ -85,15 +79,9 @@ for filename in 'edit_window' \
'submit4' \
'submit5' \
'submit6' \
'submit7' \
'submit8' \
'submit9' \
'kill4' \
'kill5' \
'kill6' \
'kill7' \
'kill8' \
'kill9'; do
'kill6'; do
cp $SCRIPTLOCATION/$VERSION/Mentat_bin/$filename $theDIR
echo $theDIR/$filename | xargs perl -pi -e "s:%INSTALLDIR%:${INSTALLDIR}:g"
echo $theDIR/$filename | xargs perl -pi -e "s:%VERSION%:${VERSION}:g"
@ -122,8 +110,8 @@ echo ''
echo 'setting file access rights...'
for filename in marc$VERSION/tools/run_damask* \
marc$VERSION/tools/comp_damask* \
mentat$VERSION/bin/submit{4..9} \
mentat$VERSION/bin/kill{4..9} ; do
mentat$VERSION/bin/submit{4..6} \
mentat$VERSION/bin/kill{4..6} ; do
chmod 755 $INSTALLDIR/${filename}
done
@ -142,10 +130,7 @@ if [ -d "$BIN_DIR" ]; then
echo 'creating symlinks ...'
echo''
theDIR=$INSTALLDIR/marc$VERSION/tools
for filename in 'run_damask' \
'run_damask_l' \
'run_damask_h' \
'run_damask_mp' \
for filename in 'run_damask_mp' \
'run_damask_lmp' \
'run_damask_hmp'; do
echo ${filename:4}$VERSION

View File

@ -21,16 +21,10 @@ The structure of this directory should be (VERSION = 20XX or 20XX.Y)
./installation.txt this text
./apply_MPIE_modifications script file to apply modifications to the installation
./VERSION/Marc_tools/comp_user.original original file from installation
./VERSION/Marc_tools/comp_damask modified version using -O1 optimization
./VERSION/Marc_tools/comp_damask_l modified version using -O0 optimization
./VERSION/Marc_tools/comp_damask_h modified version using -O2 optimization
./VERSION/Marc_tools/comp_damask_mp modified version using -O1 optimization and OpenMP
./VERSION/Marc_tools/comp_damask_lmp modified version using -O0 optimization and OpenMP
./VERSION/Marc_tools/comp_damask_hmp modified version using -O2 optimization and OpenMP
./VERSION/Marc_tools/run_marc.original original file from installation
./VERSION/Marc_tools/run_damask modified version using -O1 optimization
./VERSION/Marc_tools/run_damask_l modified version using -O0 optimization
./VERSION/Marc_tools/run_damask_h modified version using -O2 optimization
./VERSION/Marc_tools/run_damask_mp modified version using -O1 optimization and OpenMP
./VERSION/Marc_tools/run_damask_lmp modified version using -O0 optimization and OpenMP
./VERSION/Marc_tools/run_damask_hmp modified version using -O2 optimization and OpenMP
@ -42,14 +36,8 @@ The structure of this directory should be (VERSION = 20XX or 20XX.Y)
./VERSION/Mentat_bin/submit4 modified version of original calling run_h_marc
./VERSION/Mentat_bin/submit5 modified version of original calling run_marc
./VERSION/Mentat_bin/submit6 modified version of original calling run_l_marc
./VERSION/Mentat_bin/submit7 modified version of original calling run_hmp_marc
./VERSION/Mentat_bin/submit8 modified version of original calling run_mp_marc
./VERSION/Mentat_bin/submit9 modified version of original calling run_lmp_marc
./VERSION/Mentat_bin/kill4 kill file for submit4, identical to original kill1
./VERSION/Mentat_bin/kill5 kill file for submit5, identical to original kill1
./VERSION/Mentat_bin/kill6 kill file for submit6, identical to original kill1
./VERSION/Mentat_bin/kill7 kill file for submit7, identical to original kill1
./VERSION/Mentat_bin/kill8 kill file for submit8, identical to original kill1
./VERSION/Mentat_bin/kill9 kill file for submit9, identical to original kill1
./VERSION/Mentat_menus/job_run.ms.original original file from installation
./VERSION/Mentat_menus/job_run.ms modified version adding DAMASK menu to run menu

View File

@ -1,8 +1,8 @@
#! /usr/bin/env bash
if [ $1x != 3to2x ]; then
echo 'python2.7 to python'
find . -name '*.py' | xargs sed -i 's/usr\/bin\/env python2.7/usr\/bin\/env python/g'
find . -name '*.py' -type f | xargs sed -i 's/usr\/bin\/env python2.7/usr\/bin\/env python/g'
else
echo 'python to python2.7'
find . -name '*.py' | xargs sed -i 's/usr\/bin\/env python/usr\/bin\/env python2.7/g'
find . -name '*.py' -type f | xargs sed -i 's/usr\/bin\/env python/usr\/bin\/env python2.7/g'
fi

View File

@ -1,14 +1,14 @@
# -*- coding: UTF-8 no BOM -*-
import os,sys
import os, sys
import numpy as np
# ------------------------------------------------------------------
# python 3 has no unicode object, this ensures that the code works on Python 2&3
try:
test=isinstance('test', unicode)
except(NameError):
unicode=str
test = isinstance('test', unicode)
except NameError:
unicode = str
# ------------------------------------------------------------------
class ASCIItable():
@ -63,16 +63,17 @@ class ASCIItable():
x):
try:
return float(x)
except:
except ValueError:
return 0.0
# ------------------------------------------------------------------
def _removeCRLF(self,
string):
string):
"""Delete any carriage return and line feed from string"""
try:
return string.replace('\n','').replace('\r','')
except:
return string
except AttributeError:
return str(string)
# ------------------------------------------------------------------
@ -93,22 +94,22 @@ class ASCIItable():
# ------------------------------------------------------------------
def input_close(self):
try:
# try:
if self.__IO__['in'] != sys.stdin: self.__IO__['in'].close()
except:
pass
# except:
# pass
# ------------------------------------------------------------------
def output_write(self,
what):
"""Aggregate a single row (string) or list of (possibly containing further lists of) rows into output"""
if not isinstance(what, (str, unicode)):
if isinstance(what, (str, unicode)):
self.__IO__['output'] += [what]
else:
try:
for item in what: self.output_write(item)
except:
except TypeError:
self.__IO__['output'] += [str(what)]
else:
self.__IO__['output'] += [what]
return self.__IO__['buffered'] or self.output_flush()
@ -129,10 +130,10 @@ class ASCIItable():
# ------------------------------------------------------------------
def output_close(self,
dismiss = False):
try:
if self.__IO__['out'] != sys.stdout: self.__IO__['out'].close()
except:
pass
# try:
if self.__IO__['out'] != sys.stdout: self.__IO__['out'].close()
# except:
# pass
if dismiss and os.path.isfile(self.__IO__['out'].name):
os.remove(self.__IO__['out'].name)
elif self.__IO__['inPlace']:
@ -150,7 +151,7 @@ class ASCIItable():
try:
self.__IO__['in'].seek(0)
except:
except IOError:
pass
firstline = self.__IO__['in'].readline().strip()
@ -170,7 +171,7 @@ class ASCIItable():
else: # other table format
try:
self.__IO__['in'].seek(0) # try to rewind
except:
except IOError:
self.__IO__['readBuffer'] = [firstline] # or at least save data in buffer
while self.data_read(advance = False, respectLabels = False):
@ -197,7 +198,9 @@ class ASCIItable():
"""Write current header information (info + labels)"""
head = ['{}\theader'.format(len(self.info)+self.__IO__['labeled'])] if header else []
head.append(self.info)
if self.__IO__['labeled']: head.append('\t'.join(map(self._quote,self.tags)))
if self.__IO__['labeled']:
head.append('\t'.join(map(self._quote,self.tags)))
if len(self.tags) == 0: raise ValueError('no labels present.')
return self.output_write(head)
@ -257,13 +260,13 @@ class ASCIItable():
what,
reset = False):
"""Add item or list to existing set of labels (and switch on labeling)"""
if not isinstance(what, (str, unicode)):
if isinstance(what, (str, unicode)):
self.tags += [self._removeCRLF(what)]
else:
try:
for item in what: self.labels_append(item)
except:
except TypeError:
self.tags += [self._removeCRLF(str(what))]
else:
self.tags += [self._removeCRLF(what)]
self.__IO__['labeled'] = True # switch on processing (in particular writing) of tags
if reset: self.__IO__['tags'] = list(self.tags) # subsequent data_read uses current tags as data size
@ -410,13 +413,13 @@ class ASCIItable():
def info_append(self,
what):
"""Add item or list to existing set of infos"""
if not isinstance(what, (str, unicode)):
if isinstance(what, (str, unicode)):
self.info += [self._removeCRLF(what)]
else:
try:
for item in what: self.info_append(item)
except:
except TypeError:
self.info += [self._removeCRLF(str(what))]
else:
self.info += [self._removeCRLF(what)]
# ------------------------------------------------------------------
def info_clear(self):
@ -468,10 +471,8 @@ class ASCIItable():
"""Read whole data of all (given) labels as numpy array"""
from collections import Iterable
try:
self.data_rewind() # try to wind back to start of data
except:
pass # assume/hope we are at data start already...
try: self.data_rewind() # try to wind back to start of data
except: pass # assume/hope we are at data start already...
if labels is None or labels == []:
use = None # use all columns (and keep labels intact)
@ -530,13 +531,13 @@ class ASCIItable():
# ------------------------------------------------------------------
def data_append(self,
what):
if not isinstance(what, (str, unicode)):
if isinstance(what, (str, unicode)):
self.data += [what]
else:
try:
for item in what: self.data_append(item)
except:
except TypeError:
self.data += [str(what)]
else:
self.data += [what]
# ------------------------------------------------------------------
def data_set(self,
@ -581,7 +582,7 @@ class ASCIItable():
if len(items) > 2:
if items[1].lower() == 'of':
items = np.ones(datatype(items[0]))*datatype(items[2])
elif items[1].lower() == 'to':
elif items[1].lower() == 'to':
items = np.linspace(datatype(items[0]),datatype(items[2]),
abs(datatype(items[2])-datatype(items[0]))+1,dtype=int)
else: items = list(map(datatype,items))

View File

@ -65,7 +65,7 @@ if np.any(np.array(options.size) <= 0.0):
if filename == []: filename = [None]
table = damask.ASCIItable(outname = filename[0],
buffered = False)
buffered = False, labeled=False)
damask.util.report(scriptName,filename[0])

View File

@ -17,16 +17,24 @@ list(APPEND OBJECTFILES $<TARGET_OBJECTS:SYSTEM_ROUTINES>)
add_library(PREC OBJECT "prec.f90")
list(APPEND OBJECTFILES $<TARGET_OBJECTS:PREC>)
add_library(QUIT OBJECT "quit.f90")
add_dependencies(QUIT PREC)
list(APPEND OBJECTFILES $<TARGET_OBJECTS:QUIT>)
add_library(DAMASK_INTERFACE OBJECT "DAMASK_interface.f90")
add_dependencies(DAMASK_INTERFACE PREC SYSTEM_ROUTINES)
add_dependencies(DAMASK_INTERFACE QUIT SYSTEM_ROUTINES)
list(APPEND OBJECTFILES $<TARGET_OBJECTS:DAMASK_INTERFACE>)
add_library(IO OBJECT "IO.f90")
add_dependencies(IO DAMASK_INTERFACE)
list(APPEND OBJECTFILES $<TARGET_OBJECTS:IO>)
add_library(HDF5_UTILITIES OBJECT "HDF5_utilities.f90")
add_dependencies(HDF5_UTILITIES IO)
list(APPEND OBJECTFILES $<TARGET_OBJECTS:HDF5_UTILITIES>)
add_library(NUMERICS OBJECT "numerics.f90")
add_dependencies(NUMERICS IO)
add_dependencies(NUMERICS HDF5_UTILITIES)
list(APPEND OBJECTFILES $<TARGET_OBJECTS:NUMERICS>)
add_library(DEBUG OBJECT "debug.f90")

View File

@ -98,7 +98,7 @@ subroutine CPFEM_initAll(el,ip)
call config_init
call math_init
call FE_init
call mesh_init(ip, el) ! pass on coordinates to alter calcMode of first ip
call mesh_init(ip, el)
call lattice_init
call material_init
call constitutive_init
@ -314,7 +314,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt
thermal_type, &
THERMAL_conduction_ID, &
phase_Nsources, &
material_homog
material_homogenizationAt
use config, only: &
material_Nhomogenization
use crystallite, only: &
@ -503,7 +503,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt
if (.not. parallelExecution) then
chosenThermal1: select case (thermal_type(mesh_element(3,elCP)))
case (THERMAL_conduction_ID) chosenThermal1
temperature(material_homog(ip,elCP))%p(thermalMapping(material_homog(ip,elCP))%p(ip,elCP)) = &
temperature(material_homogenizationAt(elCP))%p(thermalMapping(material_homogenizationAt(elCP))%p(ip,elCP)) = &
temperature_inp
end select chosenThermal1
materialpoint_F0(1:3,1:3,ip,elCP) = ffn
@ -516,7 +516,7 @@ subroutine CPFEM_general(mode, parallelExecution, ffn, ffn1, temperature_inp, dt
CPFEM_dcsde(1:6,1:6,ip,elCP) = CPFEM_odd_jacobian * math_identity2nd(6)
chosenThermal2: select case (thermal_type(mesh_element(3,elCP)))
case (THERMAL_conduction_ID) chosenThermal2
temperature(material_homog(ip,elCP))%p(thermalMapping(material_homog(ip,elCP))%p(ip,elCP)) = &
temperature(material_homogenizationAt(elCP))%p(thermalMapping(material_homogenizationAt(elCP))%p(ip,elCP)) = &
temperature_inp
end select chosenThermal2
materialpoint_F0(1:3,1:3,ip,elCP) = ffn

View File

@ -18,7 +18,7 @@ contains
!--------------------------------------------------------------------------------------------------
!> @brief call (thread safe) all module initializations
!--------------------------------------------------------------------------------------------------
subroutine CPFEM_initAll(el,ip)
subroutine CPFEM_initAll()
use prec, only: &
pInt
use prec, only: &
@ -55,10 +55,8 @@ subroutine CPFEM_initAll(el,ip)
#endif
implicit none
integer(pInt), intent(in) :: el, & !< FE el number
ip !< FE integration point number
call DAMASK_interface_init ! Spectral and FEM interface to commandline
call DAMASK_interface_init ! Spectral and FEM interface to commandline
call prec_init
call IO_init
#ifdef FEM
@ -69,7 +67,7 @@ subroutine CPFEM_initAll(el,ip)
call config_init
call math_init
call FE_init
call mesh_init(ip, el) ! pass on coordinates to alter calcMode of first ip
call mesh_init
call lattice_init
call material_init
call constitutive_init

View File

@ -7,8 +7,13 @@
!> results
!--------------------------------------------------------------------------------------------------
program DAMASK_FEM
use, intrinsic :: &
iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran >4.6 at the moment)
#if defined(__GFORTRAN__) || __INTEL_COMPILER >= 1800
use, intrinsic :: iso_fortran_env, only: &
compiler_version, &
compiler_options
#endif
#include <petsc/finclude/petscsys.h>
use PetscDM
use prec, only: &
pInt, &
pReal, &
@ -18,9 +23,7 @@ program DAMASK_FEM
loadCaseFile, &
getSolverJobName
use IO, only: &
IO_read, &
IO_isBlank, &
IO_open_file, &
IO_stringPos, &
IO_stringValue, &
IO_floatValue, &
@ -29,12 +32,7 @@ program DAMASK_FEM
IO_lc, &
IO_intOut, &
IO_warning, &
IO_timeStamp, &
IO_EOF
use debug, only: &
debug_level, &
debug_spectral, &
debug_levelBasic
IO_timeStamp
use math ! need to include the whole module for FFTW
use CPFEM2, only: &
CPFEM_initAll
@ -42,9 +40,9 @@ program DAMASK_FEM
restartWrite, &
restartInc
use numerics, only: &
worldrank, &
maxCutBack, &
stagItMax, &
worldrank
stagItMax
use mesh, only: &
mesh_Nboundaries, &
mesh_boundaries, &
@ -57,37 +55,17 @@ program DAMASK_FEM
maxFields, &
nActiveFields, &
FIELD_MECH_ID, &
FIELD_THERMAL_ID, &
FIELD_DAMAGE_ID, &
FIELD_SOLUTE_ID, &
FIELD_MGTWIN_ID, &
COMPONENT_MECH_X_ID, &
COMPONENT_MECH_Y_ID, &
COMPONENT_MECH_Z_ID, &
COMPONENT_THERMAL_T_ID, &
COMPONENT_DAMAGE_PHI_ID, &
COMPONENT_SOLUTE_CV_ID, &
COMPONENT_SOLUTE_CVPOT_ID, &
COMPONENT_SOLUTE_CH_ID, &
COMPONENT_SOLUTE_CHPOT_ID, &
COMPONENT_SOLUTE_CVaH_ID, &
COMPONENT_SOLUTE_CVaHPOT_ID, &
COMPONENT_MGTWIN_PHI_ID, &
FIELD_MECH_label, &
FIELD_THERMAL_label, &
FIELD_DAMAGE_label, &
FIELD_SOLUTE_label, &
FIELD_MGTWIN_label
FIELD_MECH_label
use FEM_mech
implicit none
#include <petsc/finclude/petsc.h>
!--------------------------------------------------------------------------------------------------
! variables related to information from load case and geom file
integer(pInt), parameter :: FILEUNIT = 234_pInt !< file unit, DAMASK IO does not support newunit feature
integer(pInt), allocatable, dimension(:) :: chunkPos ! this is longer than needed for geometry parsing
integer(pInt) :: &
N_def = 0_pInt !< # of rate of deformation specifiers found in load case file
character(len=65536) :: &
@ -95,7 +73,6 @@ program DAMASK_FEM
!--------------------------------------------------------------------------------------------------
! loop variables, convergence etc.
integer(pInt), parameter :: &
subStepFactor = 2_pInt !< for each substep, divide the last time increment by 2.0
real(pReal) :: &
@ -105,7 +82,8 @@ program DAMASK_FEM
timeIncOld = 0.0_pReal, & !< previous time interval
remainingLoadCaseTime = 0.0_pReal !< remaining time of current load case
logical :: &
guess !< guess along former trajectory
guess, & !< guess along former trajectory
stagIterate
integer(pInt) :: &
i, &
errorID, &
@ -115,18 +93,18 @@ program DAMASK_FEM
currentLoadcase = 0_pInt, & !< current load case
currentFace = 0_pInt, &
inc, & !< current increment in current load case
totalIncsCounter = 0_pInt, & !< total No. of increments
convergedCounter = 0_pInt, & !< No. of converged increments
notConvergedCounter = 0_pInt, & !< No. of non-converged increments
totalIncsCounter = 0_pInt, & !< total # of increments
convergedCounter = 0_pInt, & !< # of converged increments
notConvergedCounter = 0_pInt, & !< # of non-converged increments
fileUnit = 0_pInt, & !< file unit for reading load case and writing results
myStat, &
statUnit = 0_pInt, & !< file unit for statistics output
lastRestartWritten = 0_pInt !< total increment No. at which last restart information was written
integer(pInt) :: &
lastRestartWritten = 0_pInt, & !< total increment No. at which last restart information was written
stagIter, &
component
logical :: &
stagIterate
component
character(len=6) :: loadcase_string
character(len=1024) :: incInfo !< string parsed to solution with information about current load case
character(len=1024) :: &
incInfo
type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases
type(tSolutionState), allocatable, dimension(:) :: solres
PetscInt :: faceSet, currentFaceSet
@ -134,17 +112,13 @@ program DAMASK_FEM
PetscErrorCode :: ierr
external :: &
MPI_abort, &
DMGetDimension, &
DMGetLabelSize, &
DMGetLabelIdIS, &
ISDestroy, &
quit
!--------------------------------------------------------------------------------------------------
! init DAMASK (all modules)
call CPFEM_initAll(el = 1_pInt, ip = 1_pInt)
write(6,'(/,a)') ' <<<+- DAMASK_FEM init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
call CPFEM_initAll
write(6,'(/,a)') ' <<<+- DAMASK_FEM init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
! reading basic information from load case file and allocate data structure containing load cases
@ -154,12 +128,13 @@ program DAMASK_FEM
!--------------------------------------------------------------------------------------------------
! reading basic information from load case file and allocate data structure containing load cases
call IO_open_file(FILEUNIT,trim(loadCaseFile))
rewind(FILEUNIT)
open(newunit=fileunit,iostat=myStat,file=trim(loadCaseFile),action='read')
if (myStat /= 0_pInt) call IO_error(100_pInt,el=myStat,ext_msg=trim(loadCaseFile))
do
line = IO_read(FILEUNIT)
if (trim(line) == IO_EOF) exit
read(fileUnit, '(A)', iostat=myStat) line
if ( myStat /= 0_pInt) exit
if (IO_isBlank(line)) cycle ! skip empty lines
chunkPos = IO_stringPos(line)
do i = 1_pInt, chunkPos(1) ! reading compulsory parameters for loadcase
select case (IO_lc(IO_stringValue(line,chunkPos,i)))
@ -183,6 +158,16 @@ program DAMASK_FEM
case(FIELD_MECH_ID)
loadCases(i)%fieldBC(field)%nComponents = dimPlex !< X, Y (, Z) displacements
allocate(loadCases(i)%fieldBC(field)%componentBC(loadCases(i)%fieldBC(field)%nComponents))
do component = 1, loadCases(i)%fieldBC(field)%nComponents
select case (component)
case (1)
loadCases(i)%fieldBC(field)%componentBC(component)%ID = COMPONENT_MECH_X_ID
case (2)
loadCases(i)%fieldBC(field)%componentBC(component)%ID = COMPONENT_MECH_Y_ID
case (3)
loadCases(i)%fieldBC(field)%componentBC(component)%ID = COMPONENT_MECH_Z_ID
end select
enddo
end select
do component = 1, loadCases(i)%fieldBC(field)%nComponents
allocate(loadCases(i)%fieldBC(field)%componentBC(component)%Value(mesh_Nboundaries), source = 0.0_pReal)
@ -193,11 +178,12 @@ program DAMASK_FEM
!--------------------------------------------------------------------------------------------------
! reading the load case and assign values to the allocated data structure
rewind(FILEUNIT)
rewind(fileUnit)
do
line = IO_read(FILEUNIT)
if (trim(line) == IO_EOF) exit
read(fileUnit, '(A)', iostat=myStat) line
if ( myStat /= 0_pInt) exit
if (IO_isBlank(line)) cycle ! skip empty lines
chunkPos = IO_stringPos(line)
do i = 1_pInt, chunkPos(1)
select case (IO_lc(IO_stringValue(line,chunkPos,i)))
@ -268,134 +254,15 @@ program DAMASK_FEM
enddo
endif
enddo
case('temp','temperature') ! thermal field
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_THERMAL_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_THERMAL_T_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('mgtwin') ! mgtwin field
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_MGTWIN_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_MGTWIN_PHI_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('damage')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_DAMAGE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_DAMAGE_PHI_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('cv')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CV_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('cvpot')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CVPOT_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('ch')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CH_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('chpot')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CHPOT_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('cvah')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CVaH_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
case('cvahpot')
do field = 1, nActiveFields
if (loadCases(currentLoadCase)%fieldBC(field)%ID == FIELD_SOLUTE_ID) then
do component = 1, loadcases(currentLoadCase)%fieldBC(field)%nComponents
if (loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%ID == COMPONENT_SOLUTE_CVaHPOT_ID) then
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(field)%componentBC(component)%Value(currentFaceSet) = &
IO_floatValue(line,chunkPos,i+1_pInt)
endif
enddo
endif
enddo
end select
enddo; enddo
close(FILEUNIT)
close(fileUnit)
!--------------------------------------------------------------------------------------------------
! consistency checks and output of load case
loadCases(1)%followFormerTrajectory = .false. ! cannot guess along trajectory for first inc of first currentLoadCase
errorID = 0_pInt
if (worldrank == 0) then
checkLoadcases: do currentLoadCase = 1_pInt, size(loadCases)
checkLoadcases: do currentLoadCase = 1_pInt, size(loadCases)
write (loadcase_string, '(i6)' ) currentLoadCase
write(6,'(1x,a,i6)') 'load case: ', currentLoadCase
if (.not. loadCases(currentLoadCase)%followFormerTrajectory) &
@ -405,18 +272,6 @@ program DAMASK_FEM
case(FIELD_MECH_ID)
write(6,'(2x,a)') 'Field '//trim(FIELD_MECH_label)
case(FIELD_THERMAL_ID)
write(6,'(2x,a)') 'Field '//trim(FIELD_THERMAL_label)
case(FIELD_DAMAGE_ID)
write(6,'(2x,a)') 'Field '//trim(FIELD_DAMAGE_label)
case(FIELD_MGTWIN_ID)
write(6,'(2x,a)') 'Field '//trim(FIELD_MGTWIN_label)
case(FIELD_SOLUTE_ID)
write(6,'(2x,a)') 'Field '//trim(FIELD_SOLUTE_label)
end select
do faceSet = 1_pInt, mesh_Nboundaries
do component = 1_pInt, loadCases(currentLoadCase)%fieldBC(field)%nComponents
@ -437,11 +292,10 @@ program DAMASK_FEM
write(6,'(2x,a,i5,/)') 'restart frequency: ', &
loadCases(currentLoadCase)%restartfrequency
if (errorID > 0_pInt) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message
enddo checkLoadcases
endif
enddo checkLoadcases
!--------------------------------------------------------------------------------------------------
! doing initialization depending on selected solver
! doing initialization depending on active solvers
call Utilities_init()
do field = 1, nActiveFields
select case (loadCases(1)%fieldBC(field)%ID)
@ -450,56 +304,49 @@ program DAMASK_FEM
end select
enddo
!--------------------------------------------------------------------------------------------------
! loopping over loadcases
loadCaseLooping: do currentLoadCase = 1_pInt, size(loadCases)
time0 = time ! currentLoadCase start time
if (loadCases(currentLoadCase)%followFormerTrajectory) then
guess = .true.
else
guess = .false. ! change of load case, homogeneous guess for the first inc
endif
!--------------------------------------------------------------------------------------------------
! loop oper incs defined in input file for current currentLoadCase
loadCaseLooping: do currentLoadCase = 1_pInt, size(loadCases)
time0 = time ! load case start time
guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc
incLooping: do inc = 1_pInt, loadCases(currentLoadCase)%incs
totalIncsCounter = totalIncsCounter + 1_pInt
!--------------------------------------------------------------------------------------------------
! forwarding time
timeIncOld = timeinc
timeIncOld = timeinc ! last timeinc that brought former inc to an end
if (loadCases(currentLoadCase)%logscale == 0_pInt) then ! linear scale
timeinc = loadCases(currentLoadCase)%time/loadCases(currentLoadCase)%incs ! only valid for given linear time scale. will be overwritten later in case loglinear scale is used
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pReal)
else
if (currentLoadCase == 1_pInt) then ! 1st currentLoadCase of logarithmic scale
if (inc == 1_pInt) then ! 1st inc of 1st currentLoadCase of logarithmic scale
if (currentLoadCase == 1_pInt) then ! 1st load case of logarithmic scale
if (inc == 1_pInt) then ! 1st inc of 1st load case of logarithmic scale
timeinc = loadCases(1)%time*(2.0_pReal**real( 1_pInt-loadCases(1)%incs ,pReal)) ! assume 1st inc is equal to 2nd
else ! not-1st inc of 1st currentLoadCase of logarithmic scale
else ! not-1st inc of 1st load case of logarithmic scale
timeinc = loadCases(1)%time*(2.0_pReal**real(inc-1_pInt-loadCases(1)%incs ,pReal))
endif
else ! not-1st currentLoadCase of logarithmic scale
else ! not-1st load case of logarithmic scale
timeinc = time0 * &
( (1.0_pReal + loadCases(currentLoadCase)%time/time0 )**(real( inc,pReal)/&
real(loadCases(currentLoadCase)%incs ,pReal))&
-(1.0_pReal + loadCases(currentLoadCase)%time/time0 )**(real( (inc-1_pInt),pReal)/&
-(1.0_pReal + loadCases(currentLoadCase)%time/time0 )**(real( inc-1_pInt ,pReal)/&
real(loadCases(currentLoadCase)%incs ,pReal)))
endif
endif
timeinc = timeinc / 2.0_pReal**real(cutBackLevel,pReal) ! depending on cut back level, decrease time step
timeinc = timeinc * real(subStepFactor,pReal)**real(-cutBackLevel,pReal) ! depending on cut back level, decrease time step
forwarding: if(totalIncsCounter >= restartInc) then
stepFraction = 0_pInt
skipping: if (totalIncsCounter <= restartInc) then ! not yet at restart inc?
time = time + timeinc ! just advance time, skip already performed calculation
guess = .true. ! QUESTION:why forced guessing instead of inheriting loadcase preference
else skipping
stepFraction = 0_pInt ! fraction scaled by stepFactor**cutLevel
!--------------------------------------------------------------------------------------------------
! loop over sub incs
subIncLooping: do while (stepFraction/subStepFactor**cutBackLevel <1_pInt)
time = time + timeinc ! forward time
stepFraction = stepFraction + 1_pInt
remainingLoadCaseTime = time0 - time + loadCases(currentLoadCase)%time + timeInc
subStepLooping: do while (stepFraction < subStepFactor**cutBackLevel)
remainingLoadCaseTime = loadCases(currentLoadCase)%time+time0 - time
time = time + timeinc ! forward target time
stepFraction = stepFraction + 1_pInt ! count step
!--------------------------------------------------------------------------------------------------
! report begin of new increment
if (worldrank == 0) then
! report begin of new step
write(6,'(/,a)') ' ###########################################################################'
write(6,'(1x,a,es12.5'//&
',a,'//IO_intOut(inc)//',a,'//IO_intOut(loadCases(currentLoadCase)%incs)//&
@ -509,12 +356,14 @@ program DAMASK_FEM
's: Increment ', inc, '/', loadCases(currentLoadCase)%incs,&
'-', stepFraction, '/', subStepFactor**cutBackLevel,&
' of load case ', currentLoadCase,'/',size(loadCases)
flush(6)
write(incInfo,'(a,'//IO_intOut(totalIncsCounter)//',a,'//IO_intOut(sum(loadCases%incs))//&
',a,'//IO_intOut(stepFraction)//',a,'//IO_intOut(subStepFactor**cutBackLevel)//')') &
write(incInfo,&
'(a,'//IO_intOut(totalIncsCounter)//&
',a,'//IO_intOut(sum(loadCases%incs))//&
',a,'//IO_intOut(stepFraction)//&
',a,'//IO_intOut(subStepFactor**cutBackLevel)//')') &
'Increment ',totalIncsCounter,'/',sum(loadCases%incs),&
'-',stepFraction, '/', subStepFactor**cutBackLevel
endif
flush(6)
!--------------------------------------------------------------------------------------------------
! forward fields
@ -539,12 +388,14 @@ program DAMASK_FEM
incInfo,timeinc,timeIncOld,loadCases(currentLoadCase)%fieldBC(field))
end select
if(.not. solres(field)%converged) exit ! no solution found
enddo
stagIter = stagIter + 1_pInt
stagIterate = stagIter < stagItMax .and. &
all(solres(:)%converged) .and. &
.not. all(solres(:)%stagConverged)
stagIterate = stagIter < stagItMax &
.and. all(solres(:)%converged) &
.and. .not. all(solres(:)%stagConverged) ! stationary with respect to staggered iteration
enddo
! check solution
@ -570,85 +421,49 @@ program DAMASK_FEM
if (worldrank == 0) write(statUnit,*) totalIncsCounter, time, cutBackLevel, &
solres%converged, solres%iterationsNeeded ! write statistics about accepted solution
endif
enddo subIncLooping
enddo subStepLooping
cutBackLevel = max(0_pInt, cutBackLevel - 1_pInt) ! try half number of subincs next inc
if(all(solres(:)%converged)) then ! report converged inc
if (all(solres(:)%converged)) then
convergedCounter = convergedCounter + 1_pInt
if (worldrank == 0) then
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') &
' increment ', totalIncsCounter, ' converged'
endif
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report converged inc
' increment ', totalIncsCounter, ' converged'
else
if (worldrank == 0) then
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report non-converged inc
' increment ', totalIncsCounter, ' NOT converged'
endif
notConvergedCounter = notConvergedCounter + 1_pInt
write(6,'(/,a,'//IO_intOut(totalIncsCounter)//',a)') & ! report non-converged inc
' increment ', totalIncsCounter, ' NOT converged'
endif; flush(6)
if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0_pInt) then ! at output frequency
if (worldrank == 0) then
write(6,'(1/,a)') ' ... writing results to file ......................................'
endif
write(6,'(1/,a)') ' ToDo: ... writing results to file ......................................'
endif
if( loadCases(currentLoadCase)%restartFrequency > 0_pInt .and. & ! at frequency of writing restart information set restart parameter for FEsolving
mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0_pInt) then ! ToDo first call to CPFEM_general will write?
restartWrite = .true.
lastRestartWritten = inc
endif
else forwarding
time = time + timeinc
guess = .true.
endif forwarding
if ( loadCases(currentLoadCase)%restartFrequency > 0_pInt & ! writing of restart info requested ...
.and. mod(inc,loadCases(currentLoadCase)%restartFrequency) == 0_pInt) then ! ... and at frequency of writing restart information
restartWrite = .true. ! set restart parameter for FEsolving
lastRestartWritten = inc ! QUESTION: first call to CPFEM_general will write?
endif
endif skipping
enddo incLooping
enddo loadCaseLooping
!--------------------------------------------------------------------------------------------------
! report summary of whole calculation
if (worldrank == 0) then
write(6,'(/,a)') ' ###########################################################################'
write(6,'(1x,i6.6,a,i6.6,a,f5.1,a)') convergedCounter, ' out of ', &
notConvergedCounter + convergedCounter, ' (', &
real(convergedCounter, pReal)/&
real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, &
' %) increments converged!'
endif
if (notConvergedCounter > 0_pInt) call quit(3_pInt) ! error if some are not converged
call quit(0_pInt) ! no complains ;)
write(6,'(1x,'//IO_intOut(convergedCounter)//',a,'//IO_intOut(notConvergedCounter + convergedCounter)//',a,f5.1,a)') &
convergedCounter, ' out of ', &
notConvergedCounter + convergedCounter, ' (', &
real(convergedCounter, pReal)/&
real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, ' %) increments converged!'
flush(6)
call MPI_file_close(fileUnit,ierr)
close(statUnit)
end program DAMASK_FEM
if (notConvergedCounter > 0_pInt) call quit(2_pInt) ! error if some are not converged
call quit(0_pInt) ! no complains ;)
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief quit subroutine to mimic behavior of FEM solvers
!> @details exits the Spectral solver and reports time and duration. Exit code 0 signals
!> everything went fine. Exit code 1 signals an error, message according to IO_error. Exit code
!> 2 signals request for regridding, increment of last saved restart information is written to
!> stderr. Exit code 3 signals no severe problems, but some increments did not converge
!--------------------------------------------------------------------------------------------------
subroutine quit(stop_id)
use prec, only: &
pInt
implicit none
integer(pInt), intent(in) :: stop_id
integer, dimension(8) :: dateAndTime ! type default integer
call date_and_time(values = dateAndTime)
write(6,'(/,a)') 'DAMASK terminated on:'
write(6,'(a,2(i2.2,a),i4.4)') 'Date: ',dateAndTime(3),'/',&
dateAndTime(2),'/',&
dateAndTime(1)
write(6,'(a,2(i2.2,a),i2.2)') 'Time: ',dateAndTime(5),':',&
dateAndTime(6),':',&
dateAndTime(7)
if (stop_id == 0_pInt) stop 0 ! normal termination
if (stop_id < 0_pInt) then ! trigger regridding
write(0,'(a,i6)') 'restart information available at ', stop_id*(-1_pInt)
stop 2
endif
if (stop_id == 3_pInt) stop 3 ! not all incs converged
stop 1 ! error (message from IO_error)
end subroutine quit
end program DAMASK_FEM

View File

@ -43,24 +43,23 @@ subroutine DAMASK_interface_init()
use, intrinsic :: &
iso_fortran_env
#include <petsc/finclude/petscsys.h>
#if PETSC_VERSION_MAJOR!=3 || PETSC_VERSION_MINOR!=9
#if PETSC_VERSION_MAJOR!=3 || PETSC_VERSION_MINOR!=10
===================================================================================================
3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x
3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x
===================================================================================================
======= THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ===========================================
========== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ========================================
============= THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x =====================================
================ THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ==================================
=================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ===============================
====================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ============================
========================= THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x =========================
============================ THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ======================
=============================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ===================
================================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ================
===================================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x =============
======================================== THIS VERSION OF DAMASK REQUIRES PETSc 3.9.x ==========
======= THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ==========================================
========== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x =======================================
============= THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ====================================
================ THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x =================================
=================== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ==============================
====================== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ===========================
========================= THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ========================
============================ THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x =====================
=============================== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ==================
================================== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ===============
===================================== THIS VERSION OF DAMASK REQUIRES PETSc 3.10.x ============
===================================================================================================
3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x 3.9.x
3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x 3.10.x
===================================================================================================
#endif
use PETScSys
@ -88,9 +87,7 @@ subroutine DAMASK_interface_init()
dateAndTime ! type default integer
PetscErrorCode :: ierr
external :: &
quit,&
PETScErrorF, & ! is called in the CHKERRQ macro
PETScInitialize
quit
open(6, encoding='UTF-8') ! for special characters in output
@ -230,8 +227,8 @@ subroutine setWorkingDirectory(workingDirectoryArg)
implicit none
character(len=*), intent(in) :: workingDirectoryArg !< working directory argument
character(len=1024) :: workingDirectory !< working directory argument
external :: quit
logical :: error
external :: quit
absolutePath: if (workingDirectoryArg(1:1) == '/') then
workingDirectory = workingDirectoryArg
@ -284,11 +281,18 @@ character(len=1024) function getGeometryFile(geometryParameter)
implicit none
character(len=1024), intent(in) :: geometryParameter
logical :: file_exists
external :: quit
getGeometryFile = trim(geometryParameter)
if (scan(getGeometryFile,'/') /= 1) getGeometryFile = trim(getCWD())//'/'//trim(getGeometryFile)
getGeometryFile = makeRelativePath(trim(getCWD()), getGeometryFile)
inquire(file=trim(getGeometryFile), exist=file_exists)
if (.not. file_exists) then
write(6,'(a)') ' Geometry file does not exists ('//trim(getGeometryFile)//')'
call quit(1_pInt)
endif
end function getGeometryFile
@ -302,11 +306,19 @@ character(len=1024) function getLoadCaseFile(loadCaseParameter)
implicit none
character(len=1024), intent(in) :: loadCaseParameter
logical :: file_exists
external :: quit
getLoadCaseFile = trim(loadCaseParameter)
if (scan(getLoadCaseFile,'/') /= 1) getLoadCaseFile = trim(getCWD())//'/'//trim(getLoadCaseFile)
getLoadCaseFile = makeRelativePath(trim(getCWD()), getLoadCaseFile)
inquire(file=trim(getLoadCaseFile), exist=file_exists)
if (.not. file_exists) then
write(6,'(a)') ' Geometry file does not exists ('//trim(getLoadCaseFile)//')'
call quit(1_pInt)
endif
end function getLoadCaseFile

View File

@ -27,9 +27,7 @@ program DAMASK_spectral
getSolverJobName, &
interface_restartInc
use IO, only: &
IO_read, &
IO_isBlank, &
IO_open_file, &
IO_stringPos, &
IO_stringValue, &
IO_floatValue, &
@ -38,8 +36,7 @@ program DAMASK_spectral
IO_lc, &
IO_intOut, &
IO_warning, &
IO_timeStamp, &
IO_EOF
IO_timeStamp
use debug, only: &
debug_level, &
debug_spectral, &
@ -90,9 +87,7 @@ program DAMASK_spectral
! variables related to information from load case and geom file
real(pReal), dimension(9) :: temp_valueVector = 0.0_pReal !< temporarily from loadcase file when reading in tensors (initialize to 0.0)
logical, dimension(9) :: temp_maskVector = .false. !< temporarily from loadcase file when reading in tensors
integer(pInt), parameter :: FILEUNIT = 234_pInt !< file unit, DAMASK IO does not support newunit feature
integer(pInt), allocatable, dimension(:) :: chunkPos
integer(pInt) :: &
N_t = 0_pInt, & !< # of time indicators found in load case file
N_n = 0_pInt, & !< # of increment specifiers found in load case file
@ -118,7 +113,7 @@ program DAMASK_spectral
stagIterate
integer(pInt) :: &
i, j, k, l, field, &
errorID, &
errorID = 0_pInt, &
cutBackLevel = 0_pInt, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$
stepFraction = 0_pInt !< fraction of current time interval
integer(pInt) :: &
@ -127,30 +122,36 @@ program DAMASK_spectral
totalIncsCounter = 0_pInt, & !< total # of increments
convergedCounter = 0_pInt, & !< # of converged increments
notConvergedCounter = 0_pInt, & !< # of non-converged increments
resUnit = 0_pInt, & !< file unit for results writing
fileUnit = 0_pInt, & !< file unit for reading load case and writing results
myStat, &
statUnit = 0_pInt, & !< file unit for statistics output
lastRestartWritten = 0_pInt, & !< total increment # at which last restart information was written
stagIter
character(len=6) :: loadcase_string
character(len=1024) :: &
incInfo, & !< string parsed to solution with information about current load case
workingDir
incInfo
type(tLoadCase), allocatable, dimension(:) :: loadCases !< array of all load cases
type(tLoadCase) :: newLoadCase
type(tSolutionState), allocatable, dimension(:) :: solres
integer(MPI_OFFSET_KIND) :: fileOffset
integer(MPI_OFFSET_KIND), dimension(:), allocatable :: outputSize
integer(pInt), parameter :: maxByteOut = 2147483647-4096 !< limit of one file output write https://trac.mpich.org/projects/mpich/ticket/1742
integer(pInt), parameter :: maxRealOut = maxByteOut/pReal
integer(pLongInt), dimension(2) :: outputIndex
integer :: ierr
PetscErrorCode :: ierr
procedure(basic_init), pointer :: &
mech_init
procedure(basic_forward), pointer :: &
mech_forward
procedure(basic_solution), pointer :: &
mech_solution
external :: &
quit
!--------------------------------------------------------------------------------------------------
! init DAMASK (all modules)
call CPFEM_initAll(el = 1_pInt, ip = 1_pInt)
call CPFEM_initAll
write(6,'(/,a)') ' <<<+- DAMASK_spectral init -+>>>'
write(6,'(/,a,/)') ' Roters et al., Computational Materials Science, 2018'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
@ -162,15 +163,40 @@ program DAMASK_spectral
if (any(thermal_type == THERMAL_conduction_ID )) nActiveFields = nActiveFields + 1
if (any(damage_type == DAMAGE_nonlocal_ID )) nActiveFields = nActiveFields + 1
allocate(solres(nActiveFields))
allocate(newLoadCase%ID(nActiveFields))
!--------------------------------------------------------------------------------------------------
! reading basic information from load case file and allocate data structure containing load cases
call IO_open_file(FILEUNIT,trim(loadCaseFile))
rewind(FILEUNIT)
! assign mechanics solver depending on selected type
select case (spectral_solver)
case (DAMASK_spectral_SolverBasic_label)
mech_init => basic_init
mech_forward => basic_forward
mech_solution => basic_solution
case (DAMASK_spectral_SolverPolarisation_label)
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0) &
call IO_warning(42_pInt, ext_msg='debug Divergence')
mech_init => polarisation_init
mech_forward => polarisation_forward
mech_solution => polarisation_solution
case default
call IO_error(error_ID = 891_pInt, ext_msg = trim(spectral_solver))
end select
!--------------------------------------------------------------------------------------------------
! reading information from load case file and to sanity checks
allocate (loadCases(0)) ! array of load cases
open(newunit=fileunit,iostat=myStat,file=trim(loadCaseFile),action='read')
if (myStat /= 0_pInt) call IO_error(100_pInt,el=myStat,ext_msg=trim(loadCaseFile))
do
line = IO_read(FILEUNIT)
if (trim(line) == IO_EOF) exit
read(fileUnit, '(A)', iostat=myStat) line
if ( myStat /= 0_pInt) exit
if (IO_isBlank(line)) cycle ! skip empty lines
currentLoadCase = currentLoadCase + 1_pInt
chunkPos = IO_stringPos(line)
do i = 1_pInt, chunkPos(1) ! reading compulsory parameters for loadcase
select case (IO_lc(IO_stringValue(line,chunkPos,i)))
@ -181,83 +207,65 @@ program DAMASK_spectral
case('n','incs','increments','steps','logincs','logincrements','logsteps')
N_n = N_n + 1_pInt
end select
enddo ! count all identifiers to allocate memory and do sanity check
enddo
enddo
if ((N_def /= N_n) .or. (N_n /= N_t) .or. N_n < 1_pInt) & ! sanity check
call IO_error(error_ID=837_pInt,el=currentLoadCase,ext_msg = trim(loadCaseFile)) ! error message for incomplete loadcase
if ((N_def /= N_n) .or. (N_n /= N_t) .or. N_n < 1_pInt) & ! sanity check
call IO_error(error_ID=837_pInt,ext_msg = trim(loadCaseFile)) ! error message for incomplete loadcase
allocate (loadCases(N_n)) ! array of load cases
loadCases%stress%myType='stress'
do i = 1, size(loadCases)
allocate(loadCases(i)%ID(nActiveFields))
newLoadCase%stress%myType='stress'
field = 1
loadCases(i)%ID(field) = FIELD_MECH_ID ! mechanical active by default
newLoadCase%ID(field) = FIELD_MECH_ID ! mechanical active by default
thermalActive: if (any(thermal_type == THERMAL_conduction_ID)) then
field = field + 1
loadCases(i)%ID(field) = FIELD_THERMAL_ID
newLoadCase%ID(field) = FIELD_THERMAL_ID
endif thermalActive
damageActive: if (any(damage_type == DAMAGE_nonlocal_ID)) then
field = field + 1
loadCases(i)%ID(field) = FIELD_DAMAGE_ID
newLoadCase%ID(field) = FIELD_DAMAGE_ID
endif damageActive
enddo
!--------------------------------------------------------------------------------------------------
! reading the load case and assign values to the allocated data structure
rewind(FILEUNIT)
do
line = IO_read(FILEUNIT)
if (trim(line) == IO_EOF) exit
if (IO_isBlank(line)) cycle ! skip empty lines
currentLoadCase = currentLoadCase + 1_pInt
chunkPos = IO_stringPos(line)
do i = 1_pInt, chunkPos(1)
readIn: do i = 1_pInt, chunkPos(1)
select case (IO_lc(IO_stringValue(line,chunkPos,i)))
case('fdot','dotf','l','velocitygrad','velgrad','velocitygradient','f') ! assign values for the deformation BC matrix
temp_valueVector = 0.0_pReal
if (IO_lc(IO_stringValue(line,chunkPos,i)) == 'fdot'.or. & ! in case of Fdot, set type to fdot
IO_lc(IO_stringValue(line,chunkPos,i)) == 'dotf') then
loadCases(currentLoadCase)%deformation%myType = 'fdot'
newLoadCase%deformation%myType = 'fdot'
else if (IO_lc(IO_stringValue(line,chunkPos,i)) == 'f') then
loadCases(currentLoadCase)%deformation%myType = 'f'
newLoadCase%deformation%myType = 'f'
else
loadCases(currentLoadCase)%deformation%myType = 'l'
newLoadCase%deformation%myType = 'l'
endif
do j = 1_pInt, 9_pInt
temp_maskVector(j) = IO_stringValue(line,chunkPos,i+j) /= '*' ! true if not a *
if (temp_maskVector(j)) temp_valueVector(j) = IO_floatValue(line,chunkPos,i+j) ! read value where applicable
enddo
loadCases(currentLoadCase)%deformation%maskLogical = & ! logical mask in 3x3 notation
transpose(reshape(temp_maskVector,[ 3,3]))
loadCases(currentLoadCase)%deformation%maskFloat = & ! float (1.0/0.0) mask in 3x3 notation
merge(ones,zeros,loadCases(currentLoadCase)%deformation%maskLogical)
loadCases(currentLoadCase)%deformation%values = math_plain9to33(temp_valueVector) ! values in 3x3 notation
newLoadCase%deformation%maskLogical = transpose(reshape(temp_maskVector,[ 3,3])) ! logical mask in 3x3 notation
newLoadCase%deformation%maskFloat = merge(ones,zeros,newLoadCase%deformation%maskLogical)! float (1.0/0.0) mask in 3x3 notation
newLoadCase%deformation%values = math_plain9to33(temp_valueVector) ! values in 3x3 notation
case('p','pk1','piolakirchhoff','stress', 's')
temp_valueVector = 0.0_pReal
do j = 1_pInt, 9_pInt
temp_maskVector(j) = IO_stringValue(line,chunkPos,i+j) /= '*' ! true if not an asterisk
if (temp_maskVector(j)) temp_valueVector(j) = IO_floatValue(line,chunkPos,i+j) ! read value where applicable
enddo
loadCases(currentLoadCase)%stress%maskLogical = transpose(reshape(temp_maskVector,[ 3,3]))
loadCases(currentLoadCase)%stress%maskFloat = merge(ones,zeros,&
loadCases(currentLoadCase)%stress%maskLogical)
loadCases(currentLoadCase)%stress%values = math_plain9to33(temp_valueVector)
newLoadCase%stress%maskLogical = transpose(reshape(temp_maskVector,[ 3,3]))
newLoadCase%stress%maskFloat = merge(ones,zeros,newLoadCase%stress%maskLogical)
newLoadCase%stress%values = math_plain9to33(temp_valueVector)
case('t','time','delta') ! increment time
loadCases(currentLoadCase)%time = IO_floatValue(line,chunkPos,i+1_pInt)
newLoadCase%time = IO_floatValue(line,chunkPos,i+1_pInt)
case('n','incs','increments','steps') ! number of increments
loadCases(currentLoadCase)%incs = IO_intValue(line,chunkPos,i+1_pInt)
newLoadCase%incs = IO_intValue(line,chunkPos,i+1_pInt)
case('logincs','logincrements','logsteps') ! number of increments (switch to log time scaling)
loadCases(currentLoadCase)%incs = IO_intValue(line,chunkPos,i+1_pInt)
loadCases(currentLoadCase)%logscale = 1_pInt
newLoadCase%incs = IO_intValue(line,chunkPos,i+1_pInt)
newLoadCase%logscale = 1_pInt
case('freq','frequency','outputfreq') ! frequency of result writings
loadCases(currentLoadCase)%outputfrequency = IO_intValue(line,chunkPos,i+1_pInt)
newLoadCase%outputfrequency = IO_intValue(line,chunkPos,i+1_pInt)
case('r','restart','restartwrite') ! frequency of writing restart information
loadCases(currentLoadCase)%restartfrequency = &
newLoadCase%restartfrequency = &
max(0_pInt,IO_intValue(line,chunkPos,i+1_pInt))
case('guessreset','dropguessing')
loadCases(currentLoadCase)%followFormerTrajectory = .false. ! do not continue to predict deformation along former trajectory
case('euler') ! rotation of currentLoadCase given in euler angles
newLoadCase%followFormerTrajectory = .false. ! do not continue to predict deformation along former trajectory
case('euler') ! rotation of load case given in euler angles
temp_valueVector = 0.0_pReal
l = 1_pInt ! assuming values given in degrees
k = 1_pInt ! assuming keyword indicating degree/radians present
@ -272,108 +280,90 @@ program DAMASK_spectral
temp_valueVector(j) = IO_floatValue(line,chunkPos,i+k+j)
enddo
if (l == 1_pInt) temp_valueVector(1:3) = temp_valueVector(1:3) * inRad ! convert to rad
loadCases(currentLoadCase)%rotation = math_EulerToR(temp_valueVector(1:3)) ! convert rad Eulers to rotation matrix
case('rotation','rot') ! assign values for the rotation of currentLoadCase matrix
newLoadCase%rotation = math_EulerToR(temp_valueVector(1:3)) ! convert rad Eulers to rotation matrix
case('rotation','rot') ! assign values for the rotation matrix
temp_valueVector = 0.0_pReal
do j = 1_pInt, 9_pInt
temp_valueVector(j) = IO_floatValue(line,chunkPos,i+j)
enddo
loadCases(currentLoadCase)%rotation = math_plain9to33(temp_valueVector)
newLoadCase%rotation = math_plain9to33(temp_valueVector)
end select
enddo; enddo
close(FILEUNIT)
!--------------------------------------------------------------------------------------------------
! consistency checks and output of load case
loadCases(1)%followFormerTrajectory = .false. ! cannot guess along trajectory for first inc of first currentLoadCase
errorID = 0_pInt
if (worldrank == 0) then
checkLoadcases: do currentLoadCase = 1_pInt, size(loadCases)
enddo readIn
newLoadCase%followFormerTrajectory = merge(.true.,.false.,currentLoadCase > 1_pInt) ! by default, guess from previous load case
reportAndCheck: if (worldrank == 0) then
write (loadcase_string, '(i6)' ) currentLoadCase
write(6,'(1x,a,i6)') 'load case: ', currentLoadCase
if (.not. loadCases(currentLoadCase)%followFormerTrajectory) &
write(6,'(2x,a)') 'drop guessing along trajectory'
if (loadCases(currentLoadCase)%deformation%myType == 'l') then
if (.not. newLoadCase%followFormerTrajectory) write(6,'(2x,a)') 'drop guessing along trajectory'
if (newLoadCase%deformation%myType == 'l') then
do j = 1_pInt, 3_pInt
if (any(loadCases(currentLoadCase)%deformation%maskLogical(j,1:3) .eqv. .true.) .and. &
any(loadCases(currentLoadCase)%deformation%maskLogical(j,1:3) .eqv. .false.)) &
errorID = 832_pInt ! each row should be either fully or not at all defined
if (any(newLoadCase%deformation%maskLogical(j,1:3) .eqv. .true.) .and. &
any(newLoadCase%deformation%maskLogical(j,1:3) .eqv. .false.)) errorID = 832_pInt ! each row should be either fully or not at all defined
enddo
write(6,'(2x,a)') 'velocity gradient:'
else if (loadCases(currentLoadCase)%deformation%myType == 'f') then
else if (newLoadCase%deformation%myType == 'f') then
write(6,'(2x,a)') 'deformation gradient at end of load case:'
else
write(6,'(2x,a)') 'deformation gradient rate:'
endif
do i = 1_pInt, 3_pInt; do j = 1_pInt, 3_pInt
if(loadCases(currentLoadCase)%deformation%maskLogical(i,j)) then
write(6,'(2x,f12.7)',advance='no') loadCases(currentLoadCase)%deformation%values(i,j)
if(newLoadCase%deformation%maskLogical(i,j)) then
write(6,'(2x,f12.7)',advance='no') newLoadCase%deformation%values(i,j)
else
write(6,'(2x,12a)',advance='no') ' * '
endif
enddo; write(6,'(/)',advance='no')
enddo
if (any(loadCases(currentLoadCase)%stress%maskLogical .eqv. &
loadCases(currentLoadCase)%deformation%maskLogical)) errorID = 831_pInt ! exclusive or masking only
if (any(loadCases(currentLoadCase)%stress%maskLogical .and. &
transpose(loadCases(currentLoadCase)%stress%maskLogical) .and. &
if (any(newLoadCase%stress%maskLogical .eqv. &
newLoadCase%deformation%maskLogical)) errorID = 831_pInt ! exclusive or masking only
if (any(newLoadCase%stress%maskLogical .and. &
transpose(newLoadCase%stress%maskLogical) .and. &
reshape([ .false.,.true.,.true.,.true.,.false.,.true.,.true.,.true.,.false.],[ 3,3]))) &
errorID = 838_pInt ! no rotation is allowed by stress BC
write(6,'(2x,a)') 'stress / GPa:'
do i = 1_pInt, 3_pInt; do j = 1_pInt, 3_pInt
if(loadCases(currentLoadCase)%stress%maskLogical(i,j)) then
write(6,'(2x,f12.7)',advance='no') loadCases(currentLoadCase)%stress%values(i,j)*1e-9_pReal
if(newLoadCase%stress%maskLogical(i,j)) then
write(6,'(2x,f12.7)',advance='no') newLoadCase%stress%values(i,j)*1e-9_pReal
else
write(6,'(2x,12a)',advance='no') ' * '
endif
enddo; write(6,'(/)',advance='no')
enddo
if (any(abs(math_mul33x33(loadCases(currentLoadCase)%rotation, &
math_transpose33(loadCases(currentLoadCase)%rotation))-math_I3) > &
if (any(abs(math_mul33x33(newLoadCase%rotation, &
transpose(newLoadCase%rotation))-math_I3) > &
reshape(spread(tol_math_check,1,9),[ 3,3]))&
.or. abs(math_det33(loadCases(currentLoadCase)%rotation)) > &
.or. abs(math_det33(newLoadCase%rotation)) > &
1.0_pReal + tol_math_check) errorID = 846_pInt ! given rotation matrix contains strain
if (any(dNeq(loadCases(currentLoadCase)%rotation, math_I3))) &
if (any(dNeq(newLoadCase%rotation, math_I3))) &
write(6,'(2x,a,/,3(3(3x,f12.7,1x)/))',advance='no') 'rotation of loadframe:',&
math_transpose33(loadCases(currentLoadCase)%rotation)
if (loadCases(currentLoadCase)%time < 0.0_pReal) errorID = 834_pInt ! negative time increment
write(6,'(2x,a,f12.6)') 'time: ', loadCases(currentLoadCase)%time
if (loadCases(currentLoadCase)%incs < 1_pInt) errorID = 835_pInt ! non-positive incs count
write(6,'(2x,a,i5)') 'increments: ', loadCases(currentLoadCase)%incs
if (loadCases(currentLoadCase)%outputfrequency < 1_pInt) errorID = 836_pInt ! non-positive result frequency
write(6,'(2x,a,i5)') 'output frequency: ', &
loadCases(currentLoadCase)%outputfrequency
write(6,'(2x,a,i5,/)') 'restart frequency: ', &
loadCases(currentLoadCase)%restartfrequency
transpose(newLoadCase%rotation)
if (newLoadCase%time < 0.0_pReal) errorID = 834_pInt ! negative time increment
write(6,'(2x,a,f12.6)') 'time: ', newLoadCase%time
if (newLoadCase%incs < 1_pInt) errorID = 835_pInt ! non-positive incs count
write(6,'(2x,a,i5)') 'increments: ', newLoadCase%incs
if (newLoadCase%outputfrequency < 1_pInt) errorID = 836_pInt ! non-positive result frequency
write(6,'(2x,a,i5)') 'output frequency: ', newLoadCase%outputfrequency
write(6,'(2x,a,i5,/)') 'restart frequency: ', newLoadCase%restartfrequency
if (errorID > 0_pInt) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message
enddo checkLoadcases
endif
endif reportAndCheck
loadCases = [loadCases,newLoadCase] ! load case is ok, append it
enddo
close(fileUnit)
!--------------------------------------------------------------------------------------------------
! doing initialization depending on selected solver
! doing initialization depending on active solvers
call Utilities_init()
do field = 1, nActiveFields
select case (loadCases(1)%ID(field))
case(FIELD_MECH_ID)
select case (spectral_solver)
case (DAMASK_spectral_SolverBasic_label)
call basic_init
case (DAMASK_spectral_SolverPolarisation_label)
if(iand(debug_level(debug_spectral),debug_levelBasic)/= 0) &
call IO_warning(42_pInt, ext_msg='debug Divergence')
call Polarisation_init
case default
call IO_error(error_ID = 891_pInt, ext_msg = trim(spectral_solver))
end select
call mech_init
case(FIELD_THERMAL_ID)
case(FIELD_THERMAL_ID)
call spectral_thermal_init
case(FIELD_DAMAGE_ID)
call spectral_damage_init()
call spectral_damage_init
end select
enddo
@ -382,22 +372,22 @@ program DAMASK_spectral
! write header of output file
if (worldrank == 0) then
writeHeader: if (interface_restartInc < 1_pInt) then
open(newunit=resUnit,file=trim(getSolverJobName())//&
open(newunit=fileUnit,file=trim(getSolverJobName())//&
'.spectralOut',form='UNFORMATTED',status='REPLACE')
write(resUnit) 'load:', trim(loadCaseFile) ! ... and write header
write(resUnit) 'workingdir:', trim(workingDir)
write(resUnit) 'geometry:', trim(geometryFile)
write(resUnit) 'grid:', grid
write(resUnit) 'size:', geomSize
write(resUnit) 'materialpoint_sizeResults:', materialpoint_sizeResults
write(resUnit) 'loadcases:', size(loadCases)
write(resUnit) 'frequencies:', loadCases%outputfrequency ! one entry per LoadCase
write(resUnit) 'times:', loadCases%time ! one entry per LoadCase
write(resUnit) 'logscales:', loadCases%logscale
write(resUnit) 'increments:', loadCases%incs ! one entry per LoadCase
write(resUnit) 'startingIncrement:', restartInc ! start with writing out the previous inc
write(resUnit) 'eoh'
close(resUnit) ! end of header
write(fileUnit) 'load:', trim(loadCaseFile) ! ... and write header
write(fileUnit) 'workingdir:', 'n/a'
write(fileUnit) 'geometry:', trim(geometryFile)
write(fileUnit) 'grid:', grid
write(fileUnit) 'size:', geomSize
write(fileUnit) 'materialpoint_sizeResults:', materialpoint_sizeResults
write(fileUnit) 'loadcases:', size(loadCases)
write(fileUnit) 'frequencies:', loadCases%outputfrequency ! one entry per LoadCase
write(fileUnit) 'times:', loadCases%time ! one entry per LoadCase
write(fileUnit) 'logscales:', loadCases%logscale
write(fileUnit) 'increments:', loadCases%incs ! one entry per LoadCase
write(fileUnit) 'startingIncrement:', restartInc ! start with writing out the previous inc
write(fileUnit) 'eoh'
close(fileUnit) ! end of header
open(newunit=statUnit,file=trim(getSolverJobName())//&
'.sta',form='FORMATTED',status='REPLACE')
write(statUnit,'(a)') 'Increment Time CutbackLevel Converged IterationsNeeded' ! statistics file
@ -419,13 +409,13 @@ program DAMASK_spectral
call MPI_file_open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.spectralOut', &
MPI_MODE_WRONLY + MPI_MODE_APPEND, &
MPI_INFO_NULL, &
resUnit, &
fileUnit, &
ierr)
if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_open')
call MPI_file_get_position(resUnit,fileOffset,ierr) ! get offset from header
call MPI_file_get_position(fileUnit,fileOffset,ierr) ! get offset from header
if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_get_position')
fileOffset = fileOffset + sum(outputSize(1:worldrank)) ! offset of my process in file (header + processes before me)
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr)
if (ierr /= 0_pInt) call IO_error(error_ID=894_pInt, ext_msg='MPI_file_seek')
writeUndeformed: if (interface_restartInc < 1_pInt) then
@ -433,7 +423,7 @@ program DAMASK_spectral
do i = 1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex = int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, & ! QUESTION: why not starting i at 0 instead of murky 1?
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)), &
call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)), &
[(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), &
int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)), &
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
@ -441,14 +431,12 @@ program DAMASK_spectral
enddo
fileOffset = fileOffset + sum(outputSize) ! forward to current file position
endif writeUndeformed
!--------------------------------------------------------------------------------------------------
! looping over loadcases
loadCaseLooping: do currentLoadCase = 1_pInt, size(loadCases)
time0 = time ! currentLoadCase start time
time0 = time ! load case start time
guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc
!--------------------------------------------------------------------------------------------------
! loop over incs defined in input file for current currentLoadCase
incLooping: do inc = 1_pInt, loadCases(currentLoadCase)%incs
totalIncsCounter = totalIncsCounter + 1_pInt
@ -458,13 +446,13 @@ program DAMASK_spectral
if (loadCases(currentLoadCase)%logscale == 0_pInt) then ! linear scale
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pReal)
else
if (currentLoadCase == 1_pInt) then ! 1st currentLoadCase of logarithmic scale
if (inc == 1_pInt) then ! 1st inc of 1st currentLoadCase of logarithmic scale
if (currentLoadCase == 1_pInt) then ! 1st load case of logarithmic scale
if (inc == 1_pInt) then ! 1st inc of 1st load case of logarithmic scale
timeinc = loadCases(1)%time*(2.0_pReal**real( 1_pInt-loadCases(1)%incs ,pReal)) ! assume 1st inc is equal to 2nd
else ! not-1st inc of 1st currentLoadCase of logarithmic scale
else ! not-1st inc of 1st load case of logarithmic scale
timeinc = loadCases(1)%time*(2.0_pReal**real(inc-1_pInt-loadCases(1)%incs ,pReal))
endif
else ! not-1st currentLoadCase of logarithmic scale
else ! not-1st load case of logarithmic scale
timeinc = time0 * &
( (1.0_pReal + loadCases(currentLoadCase)%time/time0 )**(real( inc ,pReal)/&
real(loadCases(currentLoadCase)%incs ,pReal))&
@ -480,8 +468,6 @@ program DAMASK_spectral
else skipping
stepFraction = 0_pInt ! fraction scaled by stepFactor**cutLevel
!--------------------------------------------------------------------------------------------------
! loop over sub step
subStepLooping: do while (stepFraction < subStepFactor**cutBackLevel)
remainingLoadCaseTime = loadCases(currentLoadCase)%time+time0 - time
time = time + timeinc ! forward target time
@ -512,24 +498,14 @@ program DAMASK_spectral
do field = 1, nActiveFields
select case(loadCases(currentLoadCase)%ID(field))
case(FIELD_MECH_ID)
select case (spectral_solver)
case (DAMASK_spectral_SolverBasic_label)
call Basic_forward (&
call mech_forward (&
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
deformation_BC = loadCases(currentLoadCase)%deformation, &
stress_BC = loadCases(currentLoadCase)%stress, &
rotation_BC = loadCases(currentLoadCase)%rotation)
case (DAMASK_spectral_SolverPolarisation_label)
call Polarisation_forward (&
guess,timeinc,timeIncOld,remainingLoadCaseTime, &
deformation_BC = loadCases(currentLoadCase)%deformation, &
stress_BC = loadCases(currentLoadCase)%stress, &
rotation_BC = loadCases(currentLoadCase)%rotation)
end select
case(FIELD_THERMAL_ID); call spectral_thermal_forward()
case(FIELD_DAMAGE_ID); call spectral_damage_forward()
case(FIELD_THERMAL_ID); call spectral_thermal_forward()
case(FIELD_DAMAGE_ID); call spectral_damage_forward()
end select
enddo
@ -541,20 +517,10 @@ program DAMASK_spectral
do field = 1, nActiveFields
select case(loadCases(currentLoadCase)%ID(field))
case(FIELD_MECH_ID)
select case (spectral_solver)
case (DAMASK_spectral_SolverBasic_label)
solres(field) = Basic_solution (&
incInfo,timeinc,timeIncOld, &
stress_BC = loadCases(currentLoadCase)%stress, &
rotation_BC = loadCases(currentLoadCase)%rotation)
case (DAMASK_spectral_SolverPolarisation_label)
solres(field) = Polarisation_solution (&
incInfo,timeinc,timeIncOld, &
stress_BC = loadCases(currentLoadCase)%stress, &
rotation_BC = loadCases(currentLoadCase)%rotation)
end select
solres(field) = mech_solution (&
incInfo,timeinc,timeIncOld, &
stress_BC = loadCases(currentLoadCase)%stress, &
rotation_BC = loadCases(currentLoadCase)%rotation)
case(FIELD_THERMAL_ID)
solres(field) = spectral_thermal_solution(timeinc,timeIncOld,remainingLoadCaseTime)
@ -595,7 +561,7 @@ program DAMASK_spectral
write(6,'(/,a)') ' cutting back '
else ! no more options to continue
call IO_warning(850_pInt)
call MPI_file_close(resUnit,ierr)
call MPI_file_close(fileUnit,ierr)
close(statUnit)
call quit(-1_pInt*(lastRestartWritten+1_pInt)) ! quit and provide information about last restart inc written
endif
@ -618,12 +584,12 @@ program DAMASK_spectral
write(6,'(1/,a)') ' ... writing results to file ......................................'
flush(6)
call materialpoint_postResults()
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
call MPI_file_seek (fileUnit,fileOffset,MPI_SEEK_SET,ierr)
if (ierr /= 0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex=int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, &
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
call MPI_file_write(resUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
call MPI_file_write(fileUnit,reshape(materialpoint_results(:,:,outputIndex(1):outputIndex(2)),&
[(outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)]), &
int((outputIndex(2)-outputIndex(1)+1)*int(materialpoint_sizeResults,pLongInt)),&
MPI_DOUBLE, MPI_STATUS_IGNORE, ierr)
@ -651,66 +617,12 @@ program DAMASK_spectral
convergedCounter, ' out of ', &
notConvergedCounter + convergedCounter, ' (', &
real(convergedCounter, pReal)/&
real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, &
' %) increments converged!'
real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, ' %) increments converged!'
flush(6)
call MPI_file_close(resUnit,ierr)
call MPI_file_close(fileUnit,ierr)
close(statUnit)
if (notConvergedCounter > 0_pInt) call quit(3_pInt) ! error if some are not converged
if (notConvergedCounter > 0_pInt) call quit(2_pInt) ! error if some are not converged
call quit(0_pInt) ! no complains ;)
end program DAMASK_spectral
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief quit subroutine to mimic behavior of FEM solvers
!> @details exits the Spectral solver and reports time and duration. Exit code 0 signals
!> everything went fine. Exit code 1 signals an error, message according to IO_error. Exit code
!> 2 signals no converged solution and increment of last saved restart information is written to
!> stderr. Exit code 3 signals no severe problems, but some increments did not converge
!--------------------------------------------------------------------------------------------------
subroutine quit(stop_id)
#include <petsc/finclude/petscsys.h>
use MPI
use prec, only: &
pInt
implicit none
integer(pInt), intent(in) :: stop_id
integer, dimension(8) :: dateAndTime ! type default integer
integer(pInt) :: error = 0_pInt
PetscErrorCode :: ierr = 0
logical :: ErrorInQuit
external :: &
PETScFinalize
call PETScFinalize(ierr)
if (ierr /= 0) write(6,'(a)') ' Error in PETScFinalize'
#ifdef _OPENMP
call MPI_finalize(error)
if (error /= 0) write(6,'(a)') ' Error in MPI_finalize'
#endif
ErrorInQuit = (ierr /= 0 .or. error /= 0_pInt)
call date_and_time(values = dateAndTime)
write(6,'(/,a)') 'DAMASK terminated on:'
write(6,'(a,2(i2.2,a),i4.4)') 'Date: ',dateAndTime(3),'/',&
dateAndTime(2),'/',&
dateAndTime(1)
write(6,'(a,2(i2.2,a),i2.2)') 'Time: ',dateAndTime(5),':',&
dateAndTime(6),':',&
dateAndTime(7)
if (stop_id == 0_pInt .and. .not. ErrorInQuit) stop 0 ! normal termination
if (stop_id < 0_pInt .and. .not. ErrorInQuit) then ! terminally ill, restart might help
write(0,'(a,i6)') 'restart information available at ', stop_id*(-1_pInt)
stop 2
endif
if (stop_id == 3_pInt .and. .not. ErrorInQuit) stop 3 ! not all incs converged
stop 1 ! error (message from IO_error)
end subroutine quit

View File

@ -5,12 +5,14 @@
!> @brief FEM PETSc solver
!--------------------------------------------------------------------------------------------------
module FEM_mech
#include <petsc/finclude/petscdmplex.h>
#include <petsc/finclude/petscdm.h>
#include <petsc/finclude/petsc.h>
use PETScdmda
use PETScsnes
use PETScDM
use PETScDMplex
use PETScsnes
use PETScDM
use PETScDMplex
use PETScDT
use prec, only: &
pInt, &
pReal
@ -20,9 +22,6 @@ use PETScDMplex
tSolutionState, &
tFieldBC, &
tComponentBC
use numerics, only: &
worldrank, &
worldsize
use mesh, only: &
mesh_Nboundaries, &
mesh_boundaries
@ -45,7 +44,7 @@ use PETScDMplex
SNES, private :: mech_snes
Vec, private :: solution, solution_rate, solution_local
PetscInt, private :: dimPlex, cellDof, nQuadrature, nBasis
PetscReal, allocatable, target,dimension(:), private :: qPoints, qWeights
PetscReal, allocatable, target, private :: qPoints(:), qWeights(:)
MatNullSpace, private :: matnull
!--------------------------------------------------------------------------------------------------
@ -61,27 +60,6 @@ use PETScDMplex
FEM_mech_solution ,&
FEM_mech_forward, &
FEM_mech_destroy
external :: &
MatZeroRowsColumnsLocalIS, &
PetscQuadratureCreate, &
PetscFECreateDefault, &
PetscFESetQuadrature, &
PetscFEGetDimension, &
PetscFEDestroy, &
PetscFEGetDualSpace, &
PetscQuadratureDestroy, &
PetscDSSetDiscretization, &
PetscDSGetTotalDimension, &
PetscDSGetDiscretization, &
PetscDualSpaceGetFunctional, &
DMGetLabelSize, &
DMSNESSetFunctionLocal, &
DMSNESSetJacobianLocal, &
SNESSetOptionsPrefix, &
SNESSetConvergenceTest, &
PetscObjectSetName
contains
!--------------------------------------------------------------------------------------------------
@ -97,7 +75,6 @@ subroutine FEM_mech_init(fieldBC)
use mesh, only: &
geomMesh
use numerics, only: &
worldrank, &
itmax, &
integrationOrder
use FEM_Zoo, only: &
@ -115,14 +92,14 @@ subroutine FEM_mech_init(fieldBC)
DMLabel :: BCLabel
PetscInt, allocatable, target :: numComp(:), numDoF(:), bcField(:)
PetscInt, pointer :: pNumComp(:), pNumDof(:), pBcField(:), pBcPoint(:)
PetscInt :: numBC, bcSize
PetscInt :: numBC, bcSize, nc
IS :: bcPoint
IS, allocatable, target :: bcComps(:), bcPoints(:)
IS, pointer :: pBcComps(:), pBcPoints(:)
PetscSection :: section
PetscInt :: field, faceSet, topologDim, nNodalPoints
PetscReal, pointer :: qPointsP(:), qWeightsP(:), &
nodalPointsP(:), nodalWeightsP(:)
PetscReal, dimension(:) , pointer :: qPointsP, qWeightsP, &
nodalPointsP, nodalWeightsP
PetscReal, allocatable, target :: nodalPoints(:), nodalWeights(:)
PetscScalar, pointer :: px_scal(:)
PetscScalar, allocatable, target :: x_scal(:)
@ -132,7 +109,7 @@ subroutine FEM_mech_init(fieldBC)
PetscInt :: cellStart, cellEnd, cell, basis
character(len=7) :: prefix = 'mechFE_'
PetscErrorCode :: ierr
write(6,'(/,a)') ' <<<+- FEM_mech init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
@ -144,20 +121,21 @@ subroutine FEM_mech_init(fieldBC)
!--------------------------------------------------------------------------------------------------
! Setup FEM mech discretization
allocate(qPoints(dimPlex*FEM_Zoo_nQuadrature(dimPlex,integrationOrder)))
allocate(qWeights(FEM_Zoo_nQuadrature(dimPlex,integrationOrder)))
qPoints = FEM_Zoo_QuadraturePoints(dimPlex,integrationOrder)%p
qWeights = FEM_Zoo_QuadratureWeights(dimPlex,integrationOrder)%p
nQuadrature = FEM_Zoo_nQuadrature(dimPlex,integrationOrder)
qPointsP => qPoints
qWeightsP => qWeights
call PetscQuadratureCreate(PETSC_COMM_SELF,mechQuad,ierr); CHKERRQ(ierr)
call PetscQuadratureSetData(mechQuad,dimPlex,nQuadrature,qPointsP,qWeightsP,ierr)
CHKERRQ(ierr)
call PetscFECreateDefault(mech_mesh,dimPlex,dimPlex,PETSC_TRUE,prefix, &
nc = dimPlex
call PetscQuadratureSetData(mechQuad,dimPlex,nc,nQuadrature,qPointsP,qWeightsP,ierr)
CHKERRQ(ierr)
call PetscFECreateDefault(PETSC_COMM_SELF,dimPlex,nc,PETSC_TRUE,prefix, &
integrationOrder,mechFE,ierr); CHKERRQ(ierr)
call PetscFESetQuadrature(mechFE,mechQuad,ierr); CHKERRQ(ierr)
call PetscFEGetDimension(mechFE,nBasis,ierr); CHKERRQ(ierr)
nBasis = nBasis/nc
call DMGetDS(mech_mesh,mechDS,ierr); CHKERRQ(ierr)
call PetscDSAddDiscretization(mechDS,mechFE,ierr); CHKERRQ(ierr)
call PetscDSGetTotalDimension(mechDS,cellDof,ierr); CHKERRQ(ierr)
@ -168,7 +146,7 @@ subroutine FEM_mech_init(fieldBC)
! Setup FEM mech boundary conditions
call DMGetLabel(mech_mesh,'Face Sets',BCLabel,ierr); CHKERRQ(ierr)
call DMPlexLabelComplete(mech_mesh,BCLabel,ierr); CHKERRQ(ierr)
call DMGetDefaultSection(mech_mesh,section,ierr); CHKERRQ(ierr)
call DMGetSection(mech_mesh,section,ierr); CHKERRQ(ierr)
allocate(numComp(1), source=dimPlex); pNumComp => numComp
allocate(numDof(dimPlex+1), source = 0); pNumDof => numDof
do topologDim = 0, dimPlex
@ -210,7 +188,7 @@ subroutine FEM_mech_init(fieldBC)
numBC,pBcField,pBcComps,pBcPoints,PETSC_NULL_IS, &
section,ierr)
CHKERRQ(ierr)
call DMSetDefaultSection(mech_mesh,section,ierr); CHKERRQ(ierr)
call DMSetSection(mech_mesh,section,ierr); CHKERRQ(ierr)
do faceSet = 1, numBC
call ISDestroy(bcPoints(faceSet),ierr); CHKERRQ(ierr)
enddo
@ -250,7 +228,7 @@ subroutine FEM_mech_init(fieldBC)
pV0 => v0
pCellJ => cellJ
pInvcellJ => invcellJ
call DMGetDefaultSection(mech_mesh,section,ierr); CHKERRQ(ierr)
call DMGetSection(mech_mesh,section,ierr); CHKERRQ(ierr)
call DMGetDS(mech_mesh,mechDS,ierr); CHKERRQ(ierr)
call PetscDSGetDiscretization(mechDS,0,mechFE,ierr)
CHKERRQ(ierr)
@ -262,12 +240,12 @@ subroutine FEM_mech_init(fieldBC)
call DMPlexComputeCellGeometryAffineFEM(mech_mesh,cell,pV0,pCellJ,pInvcellJ,detJ,ierr)
CHKERRQ(ierr)
cellJMat = reshape(pCellJ,shape=[dimPlex,dimPlex])
do basis = 0, nBasis-1
do basis = 0, nBasis*dimPlex-1, dimPlex
call PetscDualSpaceGetFunctional(mechDualSpace,basis,functional,ierr)
CHKERRQ(ierr)
call PetscQuadratureGetData(functional,dimPlex,nNodalPoints,nodalPointsP,nodalWeightsP,ierr)
call PetscQuadratureGetData(functional,dimPlex,nc,nNodalPoints,nodalPointsP,nodalWeightsP,ierr)
CHKERRQ(ierr)
x_scal(basis*dimPlex+1:(basis+1)*dimPlex) = pV0 + matmul(transpose(cellJMat),nodalPointsP + 1.0)
x_scal(basis+1:basis+dimPlex) = pV0 + matmul(transpose(cellJMat),nodalPointsP + 1.0)
enddo
px_scal => x_scal
call DMPlexVecSetClosure(mech_mesh,section,solution_local,cell,px_scal,INSERT_ALL_VALUES,ierr)
@ -371,7 +349,7 @@ subroutine FEM_mech_formResidual(dm_local,xx_local,f_local,dummy,ierr)
pV0 => v0
pCellJ => cellJ
pInvcellJ => invcellJ
call DMGetDefaultSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr)
call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr)
CHKERRQ(ierr)
@ -405,8 +383,8 @@ subroutine FEM_mech_formResidual(dm_local,xx_local,f_local,dummy,ierr)
do comp = 0, dimPlex-1
cidx = basis*dimPlex+comp
BMat(comp*dimPlex+1:(comp+1)*dimPlex,basis*dimPlex+comp+1) = &
matmul(IcellJMat,basisFieldDer((qPt*nBasis*dimPlex+cidx )*dimPlex+1: &
(qPt*nBasis*dimPlex+cidx+1)*dimPlex ))
matmul(IcellJMat,basisFieldDer((((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp )*dimPlex+1: &
(((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp+1)*dimPlex))
enddo
enddo
materialpoint_F(1:dimPlex,1:dimPlex,qPt+1,cell+1) = &
@ -446,8 +424,8 @@ subroutine FEM_mech_formResidual(dm_local,xx_local,f_local,dummy,ierr)
do comp = 0, dimPlex-1
cidx = basis*dimPlex+comp
BMat(comp*dimPlex+1:(comp+1)*dimPlex,basis*dimPlex+comp+1) = &
matmul(IcellJMat,basisFieldDer((qPt*nBasis*dimPlex+cidx )*dimPlex+1: &
(qPt*nBasis*dimPlex+cidx+1)*dimPlex ))
matmul(IcellJMat,basisFieldDer((((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp )*dimPlex+1: &
(((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp+1)*dimPlex))
enddo
enddo
f_scal = f_scal + &
@ -520,8 +498,8 @@ subroutine FEM_mech_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr)
call MatZeroEntries(Jac,ierr); CHKERRQ(ierr)
call DMGetDS(dm_local,prob,ierr); CHKERRQ(ierr)
call PetscDSGetTabulation(prob,0,basisField,basisFieldDer,ierr)
call DMGetDefaultSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetDefaultGlobalSection(dm_local,gSection,ierr); CHKERRQ(ierr)
call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetGlobalSection(dm_local,gSection,ierr); CHKERRQ(ierr)
call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr)
call VecWAXPY(x_local,1.0,xx_local,solution_local,ierr); CHKERRQ(ierr)
@ -555,8 +533,8 @@ subroutine FEM_mech_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr)
do comp = 0, dimPlex-1
cidx = basis*dimPlex+comp
BMat(comp*dimPlex+1:(comp+1)*dimPlex,basis*dimPlex+comp+1) = &
matmul(IcellJMat,basisFieldDer((qPt*nBasis*dimPlex+cidx )*dimPlex+1: &
(qPt*nBasis*dimPlex+cidx+1)*dimPlex ))
matmul(IcellJMat,basisFieldDer((((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp )*dimPlex+1: &
(((qPt*nBasis + basis)*dimPlex + comp)*dimPlex+comp+1)*dimPlex))
enddo
enddo
MatA = matmul(reshape(reshape(materialpoint_dPdF(1:dimPlex,1:dimPlex,1:dimPlex,1:dimPlex,qPt+1,cell+1), &
@ -605,7 +583,7 @@ subroutine FEM_mech_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,ierr)
!--------------------------------------------------------------------------------------------------
! apply boundary conditions
!call DMPlexCreateRigidBody(dm_local,matnull,ierr); CHKERRQ(ierr) MD: linker error
call DMPlexCreateRigidBody(dm_local,matnull,ierr); CHKERRQ(ierr)
call MatSetNullSpace(Jac,matnull,ierr); CHKERRQ(ierr)
call MatSetNearNullSpace(Jac,matnull,ierr); CHKERRQ(ierr)
call MatNullSpaceDestroy(matnull,ierr); CHKERRQ(ierr)
@ -646,7 +624,7 @@ subroutine FEM_mech_forward(guess,timeinc,timeinc_old,fieldBC)
ForwardData = .True.
materialpoint_F0 = materialpoint_F
call SNESGetDM(mech_snes,dm_local,ierr); CHKERRQ(ierr) !< retrieve mesh info from mech_snes into dm_local
call DMGetDefaultSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetSection(dm_local,section,ierr); CHKERRQ(ierr)
call DMGetLocalVector(dm_local,x_local,ierr); CHKERRQ(ierr)
call VecSet(x_local,0.0,ierr); CHKERRQ(ierr)
call DMGlobalToLocalBegin(dm_local,solution,INSERT_VALUES,x_local,ierr) !< retrieve my partition of global solution vector
@ -684,6 +662,7 @@ end subroutine FEM_mech_forward
!--------------------------------------------------------------------------------------------------
subroutine FEM_mech_converged(snes_local,PETScIter,xnorm,snorm,fnorm,reason,dummy,ierr)
use numerics, only: &
worldrank, &
err_struct_tolAbs, &
err_struct_tolRel
use IO, only: &

View File

@ -3,9 +3,12 @@
!> @brief Utilities used by the FEM solver
!--------------------------------------------------------------------------------------------------
module FEM_utilities
#include <petsc/finclude/petsc.h>
#include <petsc/finclude/petscdmplex.h>
#include <petsc/finclude/petscdmda.h>
#include <petsc/finclude/petscis.h>
use prec, only: pReal, pInt
use PETScdmplex
use PETScdmda
use PETScis
@ -21,25 +24,15 @@ use PETScis
! grid related information information
real(pReal), public :: wgt !< weighting factor 1/Nelems
real(pReal), public :: wgtDof !< weighting factor 1/Nelems
real(pReal), public :: C_volAvg(3,3,3,3)
!--------------------------------------------------------------------------------------------------
! output data
PetscViewer, public :: resUnit
Vec, public :: coordinatesVec
Vec, allocatable, public :: homogenizationResultsVec(:), &
crystalliteResultsVec(:,:), &
phaseResultsVec(:,:)
!--------------------------------------------------------------------------------------------------
! field labels information
character(len=*), parameter, public :: &
FIELD_MECH_label = 'mechanical', &
FIELD_THERMAL_label = 'thermal', &
FIELD_DAMAGE_label = 'damage', &
FIELD_SOLUTE_label = 'solute', &
FIELD_MGTWIN_label = 'mgtwin'
FIELD_MECH_label = 'mechanical'
enum, bind(c)
enumerator :: FIELD_UNDEFINED_ID, &
FIELD_MECH_ID, &
@ -123,34 +116,10 @@ use PETScis
utilities_indexActiveSet, &
utilities_destroy, &
FIELD_MECH_ID, &
FIELD_THERMAL_ID, &
FIELD_DAMAGE_ID, &
FIELD_SOLUTE_ID, &
FIELD_MGTWIN_ID, &
COMPONENT_MECH_X_ID, &
COMPONENT_MECH_Y_ID, &
COMPONENT_MECH_Z_ID, &
COMPONENT_THERMAL_T_ID, &
COMPONENT_DAMAGE_PHI_ID, &
COMPONENT_SOLUTE_CV_ID, &
COMPONENT_SOLUTE_CVPOT_ID, &
COMPONENT_SOLUTE_CH_ID, &
COMPONENT_SOLUTE_CHPOT_ID, &
COMPONENT_SOLUTE_CVaH_ID, &
COMPONENT_SOLUTE_CVaHPOT_ID, &
COMPONENT_MGTWIN_PHI_ID
external :: &
PetscOptionsInsertString, &
PetscObjectSetName, &
DMPlexGetHeightStratum, &
DMGetLabelIdIS, &
DMPlexGetChart, &
DMPlexLabelComplete, &
PetscViewerHDF5Open, &
PetscViewerHDF5PushGroup, &
PetscViewerHDF5PopGroup, &
PetscViewerDestroy
COMPONENT_THERMAL_T_ID
contains
@ -167,6 +136,7 @@ subroutine utilities_init()
IO_timeStamp, &
IO_open_file
use numerics, only: &
structOrder, &
integrationOrder, &
worldsize, &
worldrank, &
@ -190,14 +160,11 @@ subroutine utilities_init()
implicit none
character(len=1024) :: petsc_optionsPhysics, grainStr
character(len=1024) :: petsc_optionsPhysics
integer(pInt) :: dimPlex
integer(pInt) :: headerID = 205_pInt
PetscInt, dimension(:), pointer :: points
PetscInt, allocatable :: nEntities(:), nOutputCells(:), nOutputNodes(:), mappingCells(:)
PetscInt :: cellStart, cellEnd, cell, ip, dim, ctr, qPt
PetscInt, allocatable :: connectivity(:,:)
Vec :: connectivityVec
PetscInt, allocatable :: nEntities(:), nOutputCells(:), nOutputNodes(:)
PetscInt :: dim
PetscErrorCode :: ierr
write(6,'(/,a)') ' <<<+- DAMASK_FEM_utilities init -+>>>'
@ -221,15 +188,12 @@ subroutine utilities_init()
call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_defaultOptions),ierr)
call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_options),ierr)
CHKERRQ(ierr)
!write(petsc_optionsPhysics,'(a,i0)') '-mechFE_petscspace_order ' , structOrder
write(petsc_optionsPhysics,'(a,i0)') '-mechFE_petscspace_degree ' , structOrder
call PetscOptionsInsertString(PETSC_NULL_OPTIONS,trim(petsc_optionsPhysics),ierr)
CHKERRQ(ierr)
wgt = 1.0/real(mesh_maxNips*mesh_NcpElemsGlobal,pReal)
call PetscViewerHDF5Open(PETSC_COMM_WORLD, trim(getSolverJobName())//'.h5', &
FILE_MODE_WRITE, resUnit, ierr); CHKERRQ(ierr)
call PetscViewerHDF5PushGroup(resUnit, '/', ierr); CHKERRQ(ierr)
call DMGetDimension(geomMesh,dimPlex,ierr); CHKERRQ(ierr)
allocate(nEntities(dimPlex+1), source=0)
allocate(nOutputNodes(worldsize), source = 0)
@ -257,178 +221,6 @@ subroutine utilities_init()
write(headerID, '(a,i0)') 'number of cells : ', sum(nOutputCells)
endif
allocate(connectivity(2**dimPlex,nOutputCells(worldrank+1)))
call DMPlexGetHeightStratum(geomMesh,0,cellStart,cellEnd,ierr)
CHKERRQ(ierr)
ctr = 0
select case (integrationOrder)
case(1_pInt)
do cell = cellStart, cellEnd-1 !< loop over all elements
call DMPlexGetTransitiveClosure(geomMesh,cell,PETSC_TRUE,points,ierr)
CHKERRQ(ierr)
if (dimPlex == 2) then
connectivity(:,ctr+1) = [points( 9), points(11), points(13), points(13)] - nEntities(dimPlex+1)
ctr = ctr + 1
else
connectivity(:,ctr+1) = [points(23), points(25), points(27), points(27), &
points(29), points(29), points(29), points(29)] - nEntities(dimPlex+1)
ctr = ctr + 1
endif
enddo
case(2_pInt)
do cell = cellStart, cellEnd-1 !< loop over all elements
call DMPlexGetTransitiveClosure(geomMesh,cell,PETSC_TRUE,points,ierr)
CHKERRQ(ierr)
if (dimPlex == 2) then
connectivity(:,ctr+1) = [points(9 ), points(3), points(1), points(7)]
connectivity(:,ctr+2) = [points(11), points(5), points(1), points(3)]
connectivity(:,ctr+3) = [points(13), points(7), points(1), points(5)]
ctr = ctr + 3
else
connectivity(:,ctr+1) = [points(23), points(11), points(3), points(15), points(17), points(5), points(1), points(7)]
connectivity(:,ctr+2) = [points(25), points(13), points(3), points(11), points(19), points(9), points(1), points(5)]
connectivity(:,ctr+3) = [points(27), points(15), points(3), points(13), points(21), points(7), points(1), points(9)]
connectivity(:,ctr+4) = [points(29), points(17), points(7), points(21), points(19), points(5), points(1), points(9)]
ctr = ctr + 4_pInt
endif
enddo
case default
do cell = cellStart, cellEnd-1; do ip = 0, mesh_maxNips-1
connectivity(:,ctr+1) = cell*mesh_maxNips + ip
ctr = ctr + 1
enddo; enddo
end select
connectivity = connectivity + sum(nOutputNodes(1:worldrank))
call VecCreateMPI(PETSC_COMM_WORLD,dimPlex*nOutputNodes(worldrank+1),dimPlex*sum(nOutputNodes), &
coordinatesVec,ierr);CHKERRQ(ierr)
call PetscObjectSetName(coordinatesVec, 'NodalCoordinates',ierr)
call VecSetFromOptions(coordinatesVec, ierr); CHKERRQ(ierr)
!allocate(mappingCells(worldsize), source = 0)
!do homog = 1, material_Nhomogenization
! mappingCells = 0_pInt; mappingCells(worldrank+1) = homogOutput(homog)%sizeIpCells
! call MPI_Allreduce(MPI_IN_PLACE,mappingCells,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1),sum(mappingCells), &
! homogenizationResultsVec(homog),ierr);CHKERRQ(ierr)
! if (sum(mappingCells) > 0) then
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1)*2**dimPlex,sum(mappingCells)*2**dimPlex, &
! connectivityVec,ierr);CHKERRQ(ierr)
! call PetscObjectSetName(connectivityVec,'mapping_'//trim(homogenization_name(homog)),ierr)
! CHKERRQ(ierr)
! call VecGetArrayF90(connectivityVec,results,ierr); CHKERRQ(ierr)
! results = 0.0_pReal; ctr = 1_pInt
! do cell = cellStart, cellEnd-1; do qPt = 1, mesh_maxNips
! if (material_homog(qPt,cell+1) == homog) then
! results(ctr:ctr+2**dimPlex-1) = real(reshape(connectivity(1:2**dimPlex,mesh_maxNips*cell+qPt), &
! shape=[2**dimPlex]))
! ctr = ctr + 2**dimPlex
! endif
! enddo; enddo
! call VecRestoreArrayF90(connectivityVec, results, ierr); CHKERRQ(ierr)
! call VecAssemblyBegin(connectivityVec, ierr); CHKERRQ(ierr)
! call VecAssemblyEnd (connectivityVec, ierr); CHKERRQ(ierr)
! call VecView(connectivityVec, resUnit, ierr); CHKERRQ(ierr)
! call VecDestroy(connectivityVec, ierr); CHKERRQ(ierr)
! endif
!enddo
!do cryst = 1, material_Ncrystallite; do grain = 1, homogenization_maxNgrains
! mappingCells = 0_pInt
! mappingCells(worldrank+1) = crystalliteOutput(cryst,grain)%sizeIpCells
! call MPI_Allreduce(MPI_IN_PLACE,mappingCells,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1),sum(mappingCells), &
! crystalliteResultsVec(cryst,grain),ierr);CHKERRQ(ierr)
! if (sum(mappingCells) > 0) then
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1)*2**dimPlex,sum(mappingCells)*2**dimPlex, &
! connectivityVec,ierr);CHKERRQ(ierr)
! write(grainStr,'(a,i0)') 'Grain',grain
! call PetscObjectSetName(connectivityVec,'mapping_'// &
! trim(crystallite_name(cryst))//'_'// &
! trim(grainStr),ierr)
! CHKERRQ(ierr)
! call VecGetArrayF90(connectivityVec, results, ierr); CHKERRQ(ierr)
! results = 0.0_pReal; ctr = 1_pInt
! do cell = cellStart, cellEnd-1; do qPt = 1, mesh_maxNips
! if (homogenization_Ngrains (mesh_element(3,cell+1)) >= grain .and. &
! microstructure_crystallite(mesh_element(4,cell+1)) == cryst) then
! results(ctr:ctr+2**dimPlex-1) = real(reshape(connectivity(1:2**dimPlex,mesh_maxNips*cell+qPt), &
! shape=[2**dimPlex]))
! ctr = ctr + 2**dimPlex
! endif
! enddo; enddo
! call VecRestoreArrayF90(connectivityVec, results, ierr); CHKERRQ(ierr)
! call VecAssemblyBegin(connectivityVec, ierr); CHKERRQ(ierr)
! call VecAssemblyEnd (connectivityVec, ierr); CHKERRQ(ierr)
! call VecView(connectivityVec, resUnit, ierr); CHKERRQ(ierr)
! call VecDestroy(connectivityVec, ierr); CHKERRQ(ierr)
! endif
!enddo; enddo
!do phase = 1, material_Nphase; do grain = 1, homogenization_maxNgrains
! mappingCells = 0_pInt
! mappingCells(worldrank+1) = phaseOutput(phase,grain)%sizeIpCells
! call MPI_Allreduce(MPI_IN_PLACE,mappingCells,worldsize,MPI_INT,MPI_SUM,PETSC_COMM_WORLD,ierr)
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1),sum(mappingCells), &
! phaseResultsVec(phase,grain),ierr);CHKERRQ(ierr)
! if (sum(mappingCells) > 0) then
! call VecCreateMPI(PETSC_COMM_WORLD,mappingCells(worldrank+1)*2**dimPlex,sum(mappingCells)*2**dimPlex, &
! connectivityVec,ierr);CHKERRQ(ierr)
! write(grainStr,'(a,i0)') 'Grain',grain
! call PetscObjectSetName(connectivityVec,&
! 'mapping_'//trim(phase_name(phase))//'_'// &
! trim(grainStr),ierr)
! CHKERRQ(ierr)
! call VecGetArrayF90(connectivityVec, results, ierr)
! CHKERRQ(ierr)
! results = 0.0_pReal; ctr = 1_pInt
! do cell = cellStart, cellEnd-1; do qPt = 1, mesh_maxNips
! if (material_phase(grain,qPt,cell+1) == phase) then
! results(ctr:ctr+2**dimPlex-1) = real(reshape(connectivity(1:2**dimPlex,mesh_maxNips*cell+qPt), &
! shape=[2**dimPlex]))
! ctr = ctr + 2**dimPlex
! endif
! enddo; enddo
! call VecRestoreArrayF90(connectivityVec, results, ierr)
! CHKERRQ(ierr)
! call VecAssemblyBegin(connectivityVec, ierr);CHKERRQ(ierr)
! call VecAssemblyEnd (connectivityVec, ierr);CHKERRQ(ierr)
! call VecView(connectivityVec, resUnit, ierr);CHKERRQ(ierr)
! call VecDestroy(connectivityVec, ierr); CHKERRQ(ierr)
! endif
!enddo; enddo
!if (worldrank == 0_pInt) then
! do homog = 1, material_Nhomogenization
! call VecGetSize(homogenizationResultsVec(homog),mappingCells(1),ierr)
! CHKERRQ(ierr)
! if (mappingCells(1) > 0) &
! write(headerID, '(a,i0)') 'number of homog_'// &
! trim(homogenization_name(homog))//'_'// &
! 'cells : ', mappingCells(1)
! enddo
! do cryst = 1, material_Ncrystallite; do grain = 1, homogenization_maxNgrains
! call VecGetSize(crystalliteResultsVec(cryst,grain),mappingCells(1),ierr)
! CHKERRQ(ierr)
! write(grainStr,'(a,i0)') 'Grain',grain
! if (mappingCells(1) > 0) &
! write(headerID, '(a,i0)') 'number of cryst_'// &
! trim(crystallite_name(cryst))//'_'// &
! trim(grainStr)//'_'// &
! 'cells : ', mappingCells(1)
! enddo; enddo
! do phase = 1, material_Nphase; do grain = 1, homogenization_maxNgrains
! call VecGetSize(phaseResultsVec(phase,grain),mappingCells(1),ierr)
! CHKERRQ(ierr)
! write(grainStr,'(a,i0)') 'Grain',grain
! if (mappingCells(1) > 0) &
! write(headerID, '(a,i0)') 'number of phase_'// &
! trim(phase_name(phase))//'_'//trim(grainStr)//'_'// &
! 'cells : ', mappingCells(1)
! enddo; enddo
! close(headerID)
!endif
end subroutine utilities_init
!--------------------------------------------------------------------------------------------------
@ -438,8 +230,6 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData)
use debug, only: &
debug_reset, &
debug_info
use numerics, only: &
worldrank
use math, only: &
math_transpose33, &
math_rotate_forward33, &
@ -447,10 +237,8 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData)
use FEsolving, only: &
restartWrite
use homogenization, only: &
materialpoint_F0, &
materialpoint_F, &
materialpoint_P, &
materialpoint_dPdF, &
materialpoint_stressAndItsTangent
use mesh, only: &
mesh_NcpElems
@ -503,9 +291,7 @@ subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData)
cutBack = .false. ! reset cutBack status
P_av = sum(sum(materialpoint_P,dim=4),dim=3) * wgt ! average of P
C_volAvg = sum(sum(materialpoint_dPdF,dim=6),dim=5) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD, ierr)
end subroutine utilities_constitutiveResponse

1334
src/HDF5_utilities.f90 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -208,7 +208,7 @@ recursive function IO_recursiveRead(fileName,cnt) result(fileContent)
! count lines to allocate string array
myTotalLines = 0_pInt
do l=1_pInt, len(rawData)
if (rawData(l:l) == new_line('')) myTotalLines = myTotalLines+1
if (rawData(l:l) == new_line('') .or. l==len(rawData)) myTotalLines = myTotalLines+1 ! end of line or end of file without new line
enddo
allocate(fileContent(myTotalLines))
@ -222,6 +222,7 @@ recursive function IO_recursiveRead(fileName,cnt) result(fileContent)
do while (startPos <= len(rawData))
l = l + 1_pInt
endPos = endPos + scan(rawData(startPos:),new_line(''))
if(endPos < startPos) endPos = len(rawData) ! end of file without end of line
if(endPos - startPos >256) call IO_error(107_pInt,ext_msg=trim(fileName))
line = rawData(startPos:endPos-1_pInt)
startPos = endPos + 1_pInt
@ -1458,6 +1459,10 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
msg = 'empty list'
case (143_pInt)
msg = 'no value found for key'
case (144_pInt)
msg = 'negative number systems requested'
case (145_pInt)
msg = 'too many systems requested'
!--------------------------------------------------------------------------------------------------
! material error messages and related messages in mesh
@ -1487,6 +1492,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
msg = 'no microstructure specified via State Variable 3'
case (190_pInt)
msg = 'unknown element type:'
case (191_pInt)
msg = 'mesh consists of more than one element type'
!--------------------------------------------------------------------------------------------------
! plasticity error messages

427
src/MarcInclude/concom2018 Normal file
View File

@ -0,0 +1,427 @@
! common block definition file taken from respective MSC.Marc release and reformated to free format
!***********************************************************************
!
! File: concom.cmn
!
! MSC.Marc include file
!
integer(pInt) &
iacous, iasmbl, iautth, ibear, icompl, iconj, icreep, ideva, idyn, idynt,&
ielas, ielcma, ielect, iform, ifour, iharm, ihcps, iheat, iheatt, ihresp,&
ijoule, ilem, ilnmom, iloren, inc, incext, incsub, ipass, iplres, ipois,&
ipoist, irpflo, ismall, ismalt, isoil, ispect, ispnow, istore, iswep, ithcrp,&
itherm, iupblg, iupdat, jacflg, jel, jparks, largst, lfond, loadup, loaduq,&
lodcor, lovl, lsub, magnet, ncycle, newtnt, newton, noshr, linear, ivscpl,&
icrpim, iradrt, ipshft, itshr, iangin, iupmdr, iconjf, jincfl, jpermg, jhour,&
isolvr, jritz, jtable, jshell, jdoubl, jform, jcentr, imini, kautth, iautof,&
ibukty, iassum, icnstd, icnstt, kmakmas, imethvp, iradrte, iradrtp, iupdate, iupdatp,&
ncycnt, marmen , idynme, ihavca, ispf, kmini, imixex, largtt, kdoela, iautofg,&
ipshftp, idntrc, ipore, jtablm, jtablc, isnecma, itrnspo, imsdif, jtrnspo, mcnear,&
imech, imecht, ielcmat, ielectt, magnett, imsdift, noplas, jtabls, jactch, jtablth,&
kgmsto , jpzo, ifricsh, iremkin, iremfor, ishearp, jspf, machining, jlshell, icompsol,&
iupblgfo, jcondir, nstcrp, nactive, ipassref, nstspnt, ibeart, icheckmpc, noline, icuring,&
ishrink, ioffsflg, isetoff, ioffsetm,iharmt, inc_incdat, iautspc, ibrake, icbush, istream_input,&
iprsinp, ivlsinp, ifirst_time,ipin_m, jgnstr_glb, imarc_return,iqvcinp, nqvceid, istpnx, imicro1,&
iaxisymm, jbreakglue,iglstif, jfastasm,iwear, iwearcf, imixmeth, ielcmadyn, idinout, igena_meth,&
magf_meth, non_assumed, iredoboudry, ioffsz0,icomplt, mesh_dual, iactrp, mgnewton, iusedens,igsigd0,&
iaem, icosim, inodels, nlharm, iampini, iphasetr, inonlcl, inonlct, iforminp,ispecerror,&
icsprg
dimension :: ideva(60)
integer(pInt) num_concom
parameter(num_concom=251)
common/marc_concom/&
iacous, iasmbl, iautth, ibear, icompl, iconj, icreep, ideva, idyn, idynt,&
ielas, ielcma, ielect, iform, ifour, iharm, ihcps, iheat, iheatt, ihresp,&
ijoule, ilem, ilnmom, iloren, inc, incext, incsub, ipass, iplres, ipois,&
ipoist, irpflo, ismall, ismalt, isoil, ispect, ispnow, istore, iswep, ithcrp,&
itherm, iupblg, iupdat, jacflg, jel, jparks, largst, lfond, loadup, loaduq,&
lodcor, lovl, lsub, magnet, ncycle, newtnt, newton, noshr, linear, ivscpl,&
icrpim, iradrt, ipshft, itshr, iangin, iupmdr, iconjf, jincfl, jpermg, jhour,&
isolvr, jritz, jtable, jshell, jdoubl, jform, jcentr, imini, kautth, iautof,&
ibukty, iassum, icnstd, icnstt, kmakmas, imethvp, iradrte, iradrtp, iupdate, iupdatp,&
ncycnt, marmen, idynme, ihavca, ispf, kmini, imixex, largtt, kdoela, iautofg,&
ipshftp, idntrc, ipore, jtablm, jtablc, isnecma, itrnspo, imsdif, jtrnspo, mcnear,&
imech, imecht, ielcmat, ielectt, magnett, imsdift, noplas, jtabls, jactch, jtablth,&
kgmsto , jpzo, ifricsh, iremkin, iremfor, ishearp, jspf, machining, jlshell, icompsol,&
iupblgfo, jcondir, nstcrp, nactive, ipassref, nstspnt, ibeart, icheckmpc, noline, icuring,&
ishrink, ioffsflg, isetoff, ioffsetm,iharmt, inc_incdat, iautspc, ibrake, icbush, istream_input,&
iprsinp, ivlsinp, ifirst_time,ipin_m, jgnstr_glb, imarc_return,iqvcinp, nqvceid, istpnx, imicro1,&
iaxisymm, jbreakglue,iglstif, jfastasm,iwear, iwearcf, imixmeth, ielcmadyn, idinout, igena_meth,&
magf_meth, non_assumed, iredoboudry, ioffsz0,icomplt, mesh_dual, iactrp, mgnewton, iusedens,igsigd0,&
iaem, icosim, inodels, nlharm, iampini, iphasetr, inonlcl, inonlct, iforminp,ispecerror,&
icsprg
!
! comments of variables:
!
! iacous Control flag for acoustic analysis. Input data.
! iacous=1 modal acoustic analysis.
! iacous=2 harmonic acoustic-structural analysis.
! iasmbl Control flag to indicate that operator matrix should be
! recalculated.
! iautth Control flag for AUTO THERM option.
! ibear Control flag for bearing analysis. Input data.
! icompl Control variable to indicate that a complex analysis is
! being performed. Either a Harmonic analysis with damping,
! or a harmonic electro-magnetic analysis. Input data.
! iconj Flag for EBE conjugate gradient solver (=solver 1, retired)
! Also used for VKI iterative solver.
! icreep Control flag for creep analysis. Input data.
! ideva(60) - debug print out flag
! 1 print element stiffness matrices, mass matrix
! 2 output matrices used in tying
! 3 force the solution of a nonpositive definite matrix
! 4 print info of connections to each node
! 5 info of gap convergence, internal heat generated, contact
! touching and separation
! 6 nodal value array during rezoning
! 7 tying info in CONRAD GAP option, fluid element numbers in
! CHANNEL option
! 8 output incremental displacements in local coord. system
! 9 latent heat output
! 10 stress-strain in local coord. system
! 11 additional info on interlaminar stress
! 12 output right hand side and solution vector
! 13 info of CPU resources used and memory available on NT
! 14 info of mesh adaption process, 2D outline information
! info of penetration checking for remeshing
! save .fem files after afmesh3d meshing
! 15 surface energy balance flag
! 16 print info regarding pyrolysis
! 17 print info of "streamline topology"
! 18 print mesh data changes after remeshing
! 19 print material flow stress data read in from *.mat file
! if unit flag is on, print out flow stress after conversion
! 20 print information on table input
! 21 print out information regarding kinematic boundary conditions
! 22 print out information regarding dist loads, point loads, film
! and foundations
! 23 print out information about automatic domain decomposition
! 24 print out iteration information in SuperForm status report file
! 25 print out information for ablation
! 26 print out information for films - Table input
! 27 print out the tying forces
! 28 print out for CASI solver, convection,
! 29 DDM single file debug printout
! 30 print out cavity debug info
! 31 print out welding related info
! 32 prints categorized DDM memory usage
! 33 print out the cutting info regarding machining feature
! 34 print out the list of quantities which can be defined via a table
! and for each quantity the supported independent variables
! 35 print out detailed coupling region info
! 36 print out solver debug info level 1 (Least Detailed)
! 37 print out solver debug info level 1 (Medium Detailed)
! 38 print out solver debug info level 1 (Very Detailed)
! 39 print detailed memory allocation info
! 40 print out marc-adams debug info
! 41 output rezone mapping post file for debugging
! 42 output post file after calling oprofos() for debugging
! 43 debug printout for vcct
! 44 debug printout for progressive failure
! 45 print out automatically generated midside node coordinates (arecrd)
! 46 print out message about routine and location, where the ibort is raised (ibort_inc)
! 47 print out summary message of element variables on a
! group-basis after all the automatic changes have been
! made (em_ellibp)
! 48 Automatically generate check results based on max and min vals.
! These vals are stored in the checkr file, which is inserted
! into the *dat file by the generate_check_results script from /marc/tools
! 49 Automatically generate check results based on the real calculated values
! at the sppecified check result locations.
! These vals are stored in the checkr file, which is inserted
! into the *dat file by the update_check_results script from /marc/tools
! 50 generate a file containing the resistance or capacity matrix;
! this file can be used to compare results with a reference file
! 51 print out detailed information for segment-to-segment contact
! 52 print out detailed relative displacement information
! for uniaxial sliding contact
! 53 print out detailed sliding direction information for
! uniaxial sliding contact
! 54 print out detailed information for edges attached to a curve
! 55 print information related to viscoelasticity calculations
! 56 print out detailed information for element coloring for multithreading
! 57 print out extra overheads due to multi-threading.
! These overhead includes (i) time and (ii) memory.
! The memory report will be summed over all the children.
!
!
! 58 debug output for ELSTO usage
!
! idyn Control flag for dynamics. Input data.
! 1 = eigenvalue extraction and / or modal superposition
! 2 = Newmark Beta and Single Step Houbolt (ssh with idynme=1)
! 3 = Houbolt
! 4 = Central difference
! 5 = Newer central difference
! idynt Copy of idyn at begining of increment
! ielas Control flag for ELASTIC analysis. Input data.
! Set by user or automatically turned on by Fourier option.
! Implies that each load case is treated separately.
! In Adaptive meshing analysis , forces re-analysis until
! convergence obtained.
! Also seriously misused to indicate no convergence.
! = 1 elastic option with fourier analysis
! = 2 elastic option without fourier analysis
! =-1 no convergence in recycles or max # increments reached
! Set to 1 if ELASTIC or SUBSTRUC parameter cards are used,
! or if fourier option is used.
! Then set to 2 if not fourier analysis.
! ielcma Control flag for electromagnetic analysis. Input data.
! ielcma = 1 Harmonic formulation
! ielcma = 2 Transient formulation
! ielect Control flag for electrostatic option. Input data.
! iform Control flag indicating that contact will be performed.
! ifour Control flag for Fourier analysis.
! 0 = Odd and even terms.
! 1 = symmetric (cosine) terms
! 2 = antisymmetric (sine) terms.
! iharm Control flag to indicate that a harmonic analysis will
! be performed. May change between passes.
! ihcps Control flag for coupled thermal - stress analysis.
! iheat Control flag for heat transfer analysis. Input data.
! iheatt Permanent control flag for heat transfer analysis.
! Note in coupled analysis iheatt will remain as one,
! but iheat will be zero in stress pass.
! ihresp Control flag to indicate to perform a harmonic subincrement.
! ijoule Control flag for Joule heating.
! ilem Control flag to determin which vector is to be transformed.
! Control flag to see where one is:
! ilem = 1 - elem.f
! ilem = 2 - initst.f
! ilem = 3 - pressr.f
! ilem = 3 - fstif.f
! ilem = 4 - jflux.f
! ilem = 4 - strass.f
! ilem = 5 - mass.f
! ilem = 5 - osolty.f
! ilnmom Control flag for soil - pore pressure calculation. Input data.
! ilnmom = 0 - perform only pore pressure calculation.
! = 1 - couples pore pressure - displacement analysis
! iloren Control flag for DeLorenzi J-Integral evaluation. Input data.
! inc Increment number.
! incext Control flag indicating that currently working on a
! subincrement.
! Could be due to harmonics , damping component (bearing),
! stiffness component (bearing), auto therm creep or
! old viscoplaticity
! incsub Sub-increment number.
! ipass Control flag for which part of coupled analysis.
! ipass = -1 - reset to base values
! ipass = 0 - do nothing
! ipass = 1 - stress part
! ipass = 2 - heat transfer part
! iplres Flag indicating that either second matrix is stored.
! dynamic analysis - mass matrix
! heat transfer - specific heat matrix
! buckle - initial stress stiffness
! ipois Control flag indicating Poisson type analysis
! ipois = 1 for heat transfer
! = 1 for heat transfer part of coupled
! = 1 for bearing
! = 1 for electrostatic
! = 1 for magnetostatic
! ipoist Permanent copy of ipois. In coupled analysis , ipois = 0
! in stress portion, yet ipoist will still =1.
! irpflo global flag for rigid plastic flow analysis
! = 1 eularian formulation
! = 2 regular formulation; rigid material present in the analysis
! ismall control flag to indicate small displacement analysis. input data.
! ismall = 0 - large disp included.
! ismall = 1 - small displacement.
! the flag is changing between passes.
! ismalt permanent copy of ismall . in heat transfer portion of
! coupled analysis ismall =0 , but ismalt remains the same.
! isoil control flag indicating that soil / pore pressure
! calculation . input data.
! ispect control flag for response spectrum calculation. input data.
! ispnow control flag to indicate to perform a spectrum response
! calculation now.
! istore store stresses flag.
! istore = 0 in elem.f and if first pass of creep
! convergence checking in ogetst.f
! or harmonic analysis or thruc.f if not
! converged.
! iswep control flag for eigenvalue analysis.
! iswep=1 - go do extraction process
! ithcrp control flag for auto therm creep option. input data.
! itherm control flag for either temperature dependent material
! properties and/or thermal loads.
! iupblg control flag for follower force option. input data.
! iupdat control flag for update lagrange option for current element.
! jacflg control flag for lanczos iteration method. input data.
! jel control flag indicating that total load applied in
! increment, ignore previous solution.
! jel = 1 in increment 0
! = 1 if elastic or fourier
! = 1 in subincrements with elastic and adaptive
! jparks control flag for j integral by parks method. input data.
! largst control flag for finite strain plasticity. input data.
! lfond control variable that indicates if doing elastic
! foundation or film calculation. influences whether
! this is volumetric or surface integration.
! loadup control flag that indicates that nonlinearity occurred
! during previous increment.
! loaduq control flag that indicates that nonlinearity occurred.
! lodcor control flag for switching on the residual load correction.
! notice in input stage lodcor=0 means no loadcor,
! after omarc lodcor=1 means no loadcor
! lovl control flag for determining which "overlay" is to
! be called from ellib.
! lovl = 1 omarc
! = 2 oaread
! = 3 opress
! = 4 oasemb
! = 5 osolty
! = 6 ogetst
! = 7 oscinc
! = 8 odynam
! = 9 opmesh
! = 10 omesh2
! = 11 osetz
! = 12 oass
! = 13 oincdt
! = 14 oasmas
! = 15 ofluas
! = 16 ofluso
! = 17 oshtra
! = 18 ocass
! = 19 osoltc
! = 20 orezon
! = 21 otest
! = 22 oeigen
! lsub control variable to determine which part of element
! assembly function is being done.
! lsub = 1 - no longer used
! = 2 - beta*
! = 3 - cons*
! = 4 - ldef*
! = 5 - posw*
! = 6 - theta*
! = 7 - tmarx*
! = 8 - geom*
! magnet control flag for magnetostatic analysis. input data.
! ncycle cycle number. accumulated in osolty.f
! note first time through oasemb.f , ncycle = 0.
! newtnt control flag for permanent copy of newton.
! newton iteration type. input data.
! newton : = 1 full newton raphson
! 2 modified newton raphson
! 3 newton raphson with strain correct.
! 4 direct substitution
! 5 direct substitution followed by n.r.
! 6 direct substitution with line search
! 7 full newton raphson with secant initial stress
! 8 secant method
! 9 full newton raphson with line search
! noshr control flag for calculation interlaminar shears for
! elements 22,45, and 75. input data.
!ees
!
! jactch = 1 or 2 if elements are activated or deactivated
! = 3 if elements are adaptively remeshed or rezoned
! = 0 normally / reset to 0 when assembly is done
! ifricsh = 0 call to fricsh in otest not needed
! = 1 call to fricsh (nodal friction) in otest needed
! iremkin = 0 remove deactivated kinematic boundary conditions
! immediately - only in new input format (this is default)
! = 1 remove deactivated kinematic boundary conditions
! gradually - only in new input format
! iremfor = 0 remove force boundary conditions immediately -
! only in new input format (this is default)
! = 1 remove force boundary conditions gradually -
! only in new input format (this is default)
! ishearp set to 1 if shear panel elements are present in the model
!
! jspf = 0 not in spf loadcase
! > 0 in spf loadcase (jspf=1 during first increment)
! machining = 1 if the metal cutting feature is used, for memory allocation purpose
! = 0 (default) if no metal cutting feature required
!
! jlshell = 1 if there is a shell element in the mesh
! icompsol = 1 if there is a composite solid element in the mesh
! iupblgfo = 1 if follower force for point loads
! jcondir = 1 if contact priority option is used
! nstcrp = 0 (default) steady state creep flag (undocumented feature.
! if not 0, turns off special ncycle = 0 code in radial.f)
! nactive = number of active passes, if =1 then it's not a coupled analysis
! ipassref = reference ipass, if not in a multiphysics pass ipass=ipassref
! icheckmpc = value of mpc-check parameter option
! noline = set to 1 in osolty if no line seacrh should be done in ogetst
! icuring = set to 1 if the curing is included for the heat transfer analysis.
! ishrink = set to 1 if shrinkage strain is included for mechancial analysis.
! ioffsflg = 1 for small displacement beam/shell offsets
! = 2 for large displacement beam/shell offsets
! isetoff = 0 - do not apply beam/shell offsets
! = 1 - apply beam/shell offsets
! ioffsetm = min. value of offset flag
! iharmt = 1 global flag if a coupled analysis contains an harmonic pass
! inc_incdat = flag to record increment number of a new loadcase in incdat.f
! iautspc = flag for AutoSPC option
! ibrake = brake squeal in this increment
! icbush = set to 1 if cbush elements present in model
! istream_input = set to 1 for streaming input calling Marc as library
! iprsinp = set to 1 if pressure input, introduced so other variables
! such as h could be a function of pressure
! ivlsinp = set to 1 if velocity input, introduced so other variables
! such as h could be a function of velocity
! ipin_m = # of beam element with PIN flag
! jgnstr_glb = global control over pre or fast integrated composite shells
! imarc_return = Marc return flag for streaming input control
! iqvcimp = if non-zero, then the number of QVECT boundary conditions
! nqvceid = number of QVECT boundary conditions, where emisivity/absorbtion id entered
! istpnx = 1 if to stop at end of increment
! imicro1 = 1 if micro1 interface is used
! iaxisymm = set to 1 if axisymmetric analysis
! jbreakglue = set to 1 if breaking glued option is used
! iglstif = 1 if ddm and global stiffness matrix formed (sgi solver 6 or solver9)
! jfastasm = 1 do fast assembly using SuperForm code
! iwear = set to 1 if wear model, set to 2 if wear model and coordinates updated
! iwearcf = set to 1 to store nodal coefficient of friction for wear calculation
! imixmeth = set=1 then use nonlinear mixture material - allocate memory
! ielcmadyn = flag for magnetodynamics
! 0 - electromagnetics using newmark beta
! 1 - transient magnetics using backward euler
! idinout = flag to control if inside out elements should be deactivated
! igena_meth = 0 - generalized alpha parameters depend on whether or not contact
! is flagged (dynamic,7)
! 10 - generalized alpha parameters are optimized for a contact
! analysis (dynamic,8)
! 11 - generalized alpha parameters are optimized for an analysis
! without contact (dynamic,8)
! magf_meth = - Method to compute force in magnetostatic - structural
! = 1 - Virtual work method based on finite difference for the force computation
! = 2 - Maxwell stress tensor
! = 3 - Virtual work method based on local derivative for the force computation
! non_assumed = 1 no assumed strain formulation (forced)
! iredoboudry set to 1 if contact boundary needs to be recalculated
! ioffsz0 = 1 if composite are used with reference position.ne.0
! icomplt = 1 global flag if a coupled analysis contains an complex pass
! mesh_dual = 1 two independent meshes are used in magnetodynamic/thermal/structural
! one for magnetodynamic and the other for the remaining passes
! iactrp = 1 in an analysis with global remeshing, include inactive
! rigid bodies on post file
! mgnewton = 1 Use full Newton Raphson iteration for magnetostatic pass
!
! iusedens > 0 if mass density is used in the analysis (dynamics, mass dependent loading)
! igsigd0 = 1 set varselem(igsigd) to zero in next oasemb
! iaem = 1 if marc is called from aem (0 - off - default)
! icosim = 1 if marc is used in co-simulation software (ADAMS-MARC)
! inodels = 1 nodal integration elements 239/240/241 present
! nlharm = 0 harmonic subincrements are linear
! = 1 harmonic subincrements are nonlinear
! iampini = 0 amplitude of previous harmonic subinc is initial estimate (default)
! = 1 zero amplitude is initial estimate
! iphasetr = 1 phase transformation material model is used
! iforminp flag indicating that contact is switched on via the CONTACT
! option in the input file (as opposed to the case that contact
! is switched on internally due to cyclic symmetry or model
! section creation)
! ispecerror = a+10*b (only for spectrum response analysis with missing mass option)
! a=0 or a=1 (modal shape with non-zero shift)
! b=0 or b=1 (recover with new assembly of stiffness matrix)
! icsprg = set to 1 if spring elements present in model
!
!***********************************************************************
!$omp threadprivate(/marc_concom/)
!!

View File

@ -0,0 +1,66 @@
! common block definition file taken from respective MSC.Marc release and reformated to free format
!***********************************************************************
!
! File: creeps.cmn
!
! MSC.Marc include file
!
real(pReal) cptim,timinc,timinc_p,timinc_s,timincm,timinc_a,timinc_b
integer(pInt) icfte,icfst,icfeq,icftm,icetem,mcreep,jcreep,icpa,icftmp,icfstr,&
icfqcp,icfcpm,icrppr,icrcha,icpb,iicpmt,iicpa
real(pReal) time_beg_lcase,time_beg_inc,fractol,time_beg_pst
real(pReal) fraction_donn,timinc_ol2
!
integer(pInt) num_creepsr,num_creepsi,num_creeps2r
parameter(num_creepsr=7)
parameter(num_creepsi=17)
parameter(num_creeps2r=6)
common/marc_creeps/cptim,timinc,timinc_p,timinc_s,timincm,timinc_a,timinc_b,icfte,icfst,&
icfeq,icftm,icetem,mcreep,jcreep,icpa,icftmp,icfstr,icfqcp,icfcpm,icrppr,icrcha,icpb,iicpmt,iicpa
common/marc_creeps2/time_beg_lcase,time_beg_inc,fractol,time_beg_pst,fraction_donn,timinc_ol2
!
! cptim Total time at begining of increment.
! timinc Incremental time for this step.
! icfte Local copy number of slopes of creep strain rate function
! versus temperature. Is -1 if exponent law used.
! icfst Local copy number of slopes of creep strain rate function
! versus equivalent stress. Is -1 if exponent law used.
! icfeq Local copy number of slopes of creep strain rate function
! versus equivalent strain. Is -1 if exponent law used.
! icftm Local copy number of slopes of creep strain rate function
! versus time. Is -1 if exponent law used.
! icetem Element number that needs to be checked for creep convergence
! or, if negative, the number of elements that need to
! be checked. In the latter case the elements to check
! are stored in ielcp.
! mcreep Maximum nuber of iterations for explicit creep.
! jcreep Counter of number of iterations for explicit creep
! procedure. jcreep must be .le. mcreep
! icpa Pointer to constant in creep strain rate expression.
! icftmp Pointer to temperature dependent creep strain rate data.
! icfstr Pointer to equivalent stress dependent creep strain rate data.
! icfqcp Pointer to equivalent creep strain dependent creep strain
! rate data.
! icfcpm Pointer to equivalent creep strain rate dependent
! creep strain rate data.
! icrppr Permanent copy of icreep
! icrcha Control flag for creep convergence checking , if set to
! 1 then testing on absolute change in stress and creep
! strain, not relative testing. Input data.
! icpb Pointer to storage of material id cross reference numbers.
! iicpmt
! iicpa Pointer to constant in creep strain rate expression
!
! time_beg_lcase time at the beginning of the current load case
! time_beg_inc time at the beginning of the current increment
! fractol fraction of loadcase or increment time when we
! consider it to be finished
! time_beg_pst time corresponding to first increment to be
! read in from thermal post file for auto step
!
! timinc_old Time step of the previous increment
!
!***********************************************************************
!!$omp threadprivate(/marc_creeps/)
!!$omp threadprivate(/marc_creeps2/)
!!

View File

@ -19,16 +19,16 @@ module constitutive
constitutive_init, &
constitutive_homogenizedC, &
constitutive_microstructure, &
constitutive_LpAndItsTangent, &
constitutive_LiAndItsTangent, &
constitutive_LpAndItsTangents, &
constitutive_LiAndItsTangents, &
constitutive_initialFi, &
constitutive_TandItsTangent, &
constitutive_SandItsTangents, &
constitutive_collectDotState, &
constitutive_collectDeltaState, &
constitutive_postResults
private :: &
constitutive_hooke_TandItsTangent
constitutive_hooke_SandItsTangents
contains
@ -346,6 +346,7 @@ end subroutine constitutive_init
!--------------------------------------------------------------------------------------------------
!> @brief returns the homogenize elasticity matrix
!> ToDo: homogenizedC66 would be more consistent
!--------------------------------------------------------------------------------------------------
function constitutive_homogenizedC(ipc,ip,el)
use prec, only: &
@ -385,7 +386,7 @@ subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
use material, only: &
phase_plasticity, &
material_phase, &
material_homog, &
material_homogenizationAt, &
temperature, &
thermalMapping, &
PLASTICITY_dislotwin_ID, &
@ -412,7 +413,7 @@ subroutine constitutive_microstructure(orientations, Fe, Fp, ipc, ip, el)
real(pReal), intent(in), dimension(:,:,:,:) :: &
orientations !< crystal orientations as quaternions
ho = material_homog(ip,el)
ho = material_homogenizationAt(el)
tme = thermalMapping(ho)%p(ip,el)
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
@ -430,7 +431,7 @@ end subroutine constitutive_microstructure
!--------------------------------------------------------------------------------------------------
!> @brief contains the constitutive equation for calculating the velocity gradient
!--------------------------------------------------------------------------------------------------
subroutine constitutive_LpAndItsTangent(Lp, dLp_dTstar3333, dLp_dFi3333, Tstar_v, Fi, ipc, ip, el)
subroutine constitutive_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, S6, Fi, ipc, ip, el)
use prec, only: &
pReal
use math, only: &
@ -439,9 +440,11 @@ subroutine constitutive_LpAndItsTangent(Lp, dLp_dTstar3333, dLp_dFi3333, Tstar_v
math_Mandel33to6, &
math_Plain99to3333
use material, only: &
phasememberAt, &
phase_plasticity, &
phase_plasticityInstance, &
material_phase, &
material_homog, &
material_homogenizationAt, &
temperature, &
thermalMapping, &
PLASTICITY_NONE_ID, &
@ -470,70 +473,90 @@ subroutine constitutive_LpAndItsTangent(Lp, dLp_dTstar3333, dLp_dFi3333, Tstar_v
ip, & !< integration point
el !< element
real(pReal), intent(in), dimension(6) :: &
Tstar_v !< 2nd Piola-Kirchhoff stress
S6 !< 2nd Piola-Kirchhoff stress (vector notation)
real(pReal), intent(in), dimension(3,3) :: &
Fi !< intermediate deformation gradient
real(pReal), intent(out), dimension(3,3) :: &
Lp !< plastic velocity gradient
real(pReal), intent(out), dimension(3,3,3,3) :: &
dLp_dTstar3333, & !< derivative of Lp with respect to Tstar (4th-order tensor)
dLp_dFi3333 !< derivative of Lp with respect to Fi (4th-order tensor)
real(pReal), dimension(6) :: &
Mstar_v !< Mandel stress work conjugate with Lp
dLp_dS, &
dLp_dFi !< derivative of Lp with respect to Fi
real(pReal), dimension(3,3,3,3) :: &
dLp_dMp !< derivative of Lp with respect to Mandel stress
real(pReal), dimension(9,9) :: &
dLp_dMstar !< derivative of Lp with respect to Mstar (4th-order tensor)
dLp_dMp99 !< derivative of Lp with respect to Mstar (matrix notation)
real(pReal), dimension(3,3) :: &
temp_33
Mp, & !< Mandel stress work conjugate with Lp
S !< 2nd Piola-Kirchhoff stress
integer(pInt) :: &
ho, & !< homogenization
tme !< thermal member position
integer(pInt) :: &
i, j
i, j, instance, of
ho = material_homog(ip,el)
ho = material_homogenizationAt(el)
tme = thermalMapping(ho)%p(ip,el)
Mstar_v = math_Mandel33to6(math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_Mandel6to33(Tstar_v)))
S = math_Mandel6to33(S6)
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),S)
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
case (PLASTICITY_NONE_ID) plasticityType
Lp = 0.0_pReal
dLp_dMstar = 0.0_pReal
dLp_dMp = 0.0_pReal
case (PLASTICITY_ISOTROPIC_ID) plasticityType
call plastic_isotropic_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v,ipc,ip,el)
call plastic_isotropic_LpAndItsTangent (Lp,dLp_dMp99, math_Mandel33to6(Mp),ipc,ip,el)
dLp_dMp = math_Plain99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
case (PLASTICITY_PHENOPOWERLAW_ID) plasticityType
call plastic_phenopowerlaw_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v,ipc,ip,el)
of = phasememberAt(ipc,ip,el)
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
call plastic_phenopowerlaw_LpAndItsTangent (Lp,dLp_dMp, Mp,instance,of)
case (PLASTICITY_KINEHARDENING_ID) plasticityType
call plastic_kinehardening_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v,ipc,ip,el)
call plastic_kinehardening_LpAndItsTangent (Lp,dLp_dMp99, math_Mandel33to6(Mp),ipc,ip,el)
dLp_dMp = math_Plain99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
case (PLASTICITY_NONLOCAL_ID) plasticityType
call plastic_nonlocal_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v, &
call plastic_nonlocal_LpAndItsTangent (Lp,dLp_dMp99, math_Mandel33to6(Mp), &
temperature(ho)%p(tme),ip,el)
dLp_dMp = math_Plain99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
case (PLASTICITY_DISLOTWIN_ID) plasticityType
call plastic_dislotwin_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v, &
call plastic_dislotwin_LpAndItsTangent (Lp,dLp_dMp99, math_Mandel33to6(Mp), &
temperature(ho)%p(tme),ipc,ip,el)
dLp_dMp = math_Plain99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
case (PLASTICITY_DISLOUCLA_ID) plasticityType
call plastic_disloucla_LpAndItsTangent (Lp,dLp_dMstar,Mstar_v, &
call plastic_disloucla_LpAndItsTangent (Lp,dLp_dMp99, math_Mandel33to6(Mp), &
temperature(ho)%p(tme), ipc,ip,el)
dLp_dMp = math_Plain99to3333(dLp_dMp99) ! ToDo: We revert here the last statement in plastic_xx_LpAndItsTanget
end select plasticityType
dLp_dTstar3333 = math_Plain99to3333(dLp_dMstar)
temp_33 = math_mul33x33(Fi,math_Mandel6to33(Tstar_v))
forall(i = 1_pInt:3_pInt, j = 1_pInt:3_pInt) &
dLp_dFi3333(i,j,1:3,1:3) = math_mul33x33(temp_33,transpose(dLp_dTstar3333(i,j,1:3,1:3))) + &
math_mul33x33(math_mul33x33(Fi,dLp_dTstar3333(i,j,1:3,1:3)),math_Mandel6to33(Tstar_v))
#ifdef __INTEL_COMPILER
forall(i = 1_pInt:3_pInt, j = 1_pInt:3_pInt)
#else
do concurrent(i = 1_pInt:3_pInt, j = 1_pInt:3_pInt)
#endif
dLp_dFi(i,j,1:3,1:3) = math_mul33x33(math_mul33x33(Fi,S),transpose(dLp_dMp(i,j,1:3,1:3))) + &
math_mul33x33(math_mul33x33(Fi,dLp_dMp(i,j,1:3,1:3)),S)
dLp_dS(i,j,1:3,1:3) = math_mul33x33(math_mul33x33(transpose(Fi),Fi),dLp_dMp(i,j,1:3,1:3)) ! ToDo: @PS: why not: dLp_dMp:(FiT Fi)
#ifdef __INTEL_COMPILER
end forall
#else
enddo
#endif
temp_33 = math_mul33x33(transpose(Fi),Fi)
forall(i = 1_pInt:3_pInt, j = 1_pInt:3_pInt) &
dLp_dTstar3333(i,j,1:3,1:3) = math_mul33x33(temp_33,dLp_dTstar3333(i,j,1:3,1:3))
end subroutine constitutive_LpAndItsTangent
end subroutine constitutive_LpAndItsTangents
!--------------------------------------------------------------------------------------------------
!> @brief contains the constitutive equation for calculating the velocity gradient
!--------------------------------------------------------------------------------------------------
subroutine constitutive_LiAndItsTangent(Li, dLi_dTstar3333, dLi_dFi3333, Tstar_v, Fi, ipc, ip, el)
subroutine constitutive_LiAndItsTangents(Li, dLi_dS, dLi_dFi, S6, Fi, ipc, ip, el)
use prec, only: &
pReal
use math, only: &
@ -571,18 +594,18 @@ subroutine constitutive_LiAndItsTangent(Li, dLi_dTstar3333, dLi_dFi3333, Tstar_v
ip, & !< integration point
el !< element
real(pReal), intent(in), dimension(6) :: &
Tstar_v !< 2nd Piola-Kirchhoff stress
S6 !< 2nd Piola-Kirchhoff stress (vector notation)
real(pReal), intent(in), dimension(3,3) :: &
Fi !< intermediate deformation gradient
real(pReal), intent(out), dimension(3,3) :: &
Li !< intermediate velocity gradient
real(pReal), intent(out), dimension(3,3,3,3) :: &
dLi_dTstar3333, & !< derivative of Li with respect to Tstar (4th-order tensor)
dLi_dFi3333
dLi_dS, & !< derivative of Li with respect to S
dLi_dFi
real(pReal), dimension(3,3) :: &
my_Li !< intermediate velocity gradient
real(pReal), dimension(3,3,3,3) :: &
my_dLi_dTstar
my_dLi_dS
real(pReal), dimension(3,3) :: &
FiInv, &
temp_33
@ -594,51 +617,52 @@ subroutine constitutive_LiAndItsTangent(Li, dLi_dTstar3333, dLi_dFi3333, Tstar_v
i, j
Li = 0.0_pReal
dLi_dTstar3333 = 0.0_pReal
dLi_dFi3333 = 0.0_pReal
dLi_dS = 0.0_pReal
dLi_dFi = 0.0_pReal
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
case (PLASTICITY_isotropic_ID) plasticityType
call plastic_isotropic_LiAndItsTangent(my_Li, my_dLi_dTstar, Tstar_v, ipc, ip, el)
call plastic_isotropic_LiAndItsTangent(my_Li, my_dLi_dS, S6, ipc, ip, el)
case default plasticityType
my_Li = 0.0_pReal
my_dLi_dTstar = 0.0_pReal
my_dLi_dS = 0.0_pReal
end select plasticityType
Li = Li + my_Li
dLi_dTstar3333 = dLi_dTstar3333 + my_dLi_dTstar
dLi_dS = dLi_dS + my_dLi_dS
KinematicsLoop: do k = 1_pInt, phase_Nkinematics(material_phase(ipc,ip,el))
kinematicsType: select case (phase_kinematics(k,material_phase(ipc,ip,el)))
case (KINEMATICS_cleavage_opening_ID) kinematicsType
call kinematics_cleavage_opening_LiAndItsTangent(my_Li, my_dLi_dTstar, Tstar_v, ipc, ip, el)
call kinematics_cleavage_opening_LiAndItsTangent(my_Li, my_dLi_dS, S6, ipc, ip, el)
case (KINEMATICS_slipplane_opening_ID) kinematicsType
call kinematics_slipplane_opening_LiAndItsTangent(my_Li, my_dLi_dTstar, Tstar_v, ipc, ip, el)
call kinematics_slipplane_opening_LiAndItsTangent(my_Li, my_dLi_dS, S6, ipc, ip, el)
case (KINEMATICS_thermal_expansion_ID) kinematicsType
call kinematics_thermal_expansion_LiAndItsTangent(my_Li, my_dLi_dTstar, ipc, ip, el)
call kinematics_thermal_expansion_LiAndItsTangent(my_Li, my_dLi_dS, ipc, ip, el)
case (KINEMATICS_vacancy_strain_ID) kinematicsType
call kinematics_vacancy_strain_LiAndItsTangent(my_Li, my_dLi_dTstar, ipc, ip, el)
call kinematics_vacancy_strain_LiAndItsTangent(my_Li, my_dLi_dS, ipc, ip, el)
case (KINEMATICS_hydrogen_strain_ID) kinematicsType
call kinematics_hydrogen_strain_LiAndItsTangent(my_Li, my_dLi_dTstar, ipc, ip, el)
call kinematics_hydrogen_strain_LiAndItsTangent(my_Li, my_dLi_dS, ipc, ip, el)
case default kinematicsType
my_Li = 0.0_pReal
my_dLi_dTstar = 0.0_pReal
my_dLi_dS = 0.0_pReal
end select kinematicsType
Li = Li + my_Li
dLi_dTstar3333 = dLi_dTstar3333 + my_dLi_dTstar
dLi_dS = dLi_dS + my_dLi_dS
enddo KinematicsLoop
FiInv = math_inv33(Fi)
detFi = math_det33(Fi)
Li = math_mul33x33(math_mul33x33(Fi,Li),FiInv)*detFi !< push forward to intermediate configuration
temp_33 = math_mul33x33(FiInv,Li)
forall(i = 1_pInt:3_pInt, j = 1_pInt:3_pInt)
dLi_dTstar3333(1:3,1:3,i,j) = math_mul33x33(math_mul33x33(Fi,dLi_dTstar3333(1:3,1:3,i,j)),FiInv)*detFi
dLi_dFi3333 (1:3,1:3,i,j) = dLi_dFi3333(1:3,1:3,i,j) + Li*FiInv(j,i)
dLi_dFi3333 (1:3,i,1:3,j) = dLi_dFi3333(1:3,i,1:3,j) + math_I3*temp_33(j,i) + Li*FiInv(j,i)
end forall
end subroutine constitutive_LiAndItsTangent
do i = 1_pInt,3_pInt; do j = 1_pInt,3_pInt
dLi_dS(1:3,1:3,i,j) = math_mul33x33(math_mul33x33(Fi,dLi_dS(1:3,1:3,i,j)),FiInv)*detFi
dLi_dFi(1:3,1:3,i,j) = dLi_dFi(1:3,1:3,i,j) + Li*FiInv(j,i)
dLi_dFi(1:3,i,1:3,j) = dLi_dFi(1:3,i,1:3,j) + math_I3*temp_33(j,i) + Li*FiInv(j,i)
end do; end do
end subroutine constitutive_LiAndItsTangents
!--------------------------------------------------------------------------------------------------
@ -696,10 +720,10 @@ end function constitutive_initialFi
!--------------------------------------------------------------------------------------------------
!> @brief returns the 2nd Piola-Kirchhoff stress tensor and its tangent with respect to
!> the elastic deformation gradient depending on the selected elastic law (so far no case switch
!! because only Hooke is implemented
!> the elastic/intermediate deformation gradients depending on the selected elastic law
!! (so far no case switch because only Hooke is implemented)
!--------------------------------------------------------------------------------------------------
subroutine constitutive_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip, el)
subroutine constitutive_SandItsTangents(S, dS_dFe, dS_dFi, Fe, Fi, ipc, ip, el)
use prec, only: &
pReal
@ -712,34 +736,32 @@ subroutine constitutive_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip, el)
Fe, & !< elastic deformation gradient
Fi !< intermediate deformation gradient
real(pReal), intent(out), dimension(3,3) :: &
T !< 2nd Piola-Kirchhoff stress tensor
S !< 2nd Piola-Kirchhoff stress tensor
real(pReal), intent(out), dimension(3,3,3,3) :: &
dT_dFe, & !< derivative of 2nd P-K stress with respect to elastic deformation gradient
dT_dFi !< derivative of 2nd P-K stress with respect to intermediate deformation gradient
dS_dFe, & !< derivative of 2nd P-K stress with respect to elastic deformation gradient
dS_dFi !< derivative of 2nd P-K stress with respect to intermediate deformation gradient
call constitutive_hooke_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip, el)
call constitutive_hooke_SandItsTangents(S, dS_dFe, dS_dFi, Fe, Fi, ipc, ip, el)
end subroutine constitutive_TandItsTangent
end subroutine constitutive_SandItsTangents
!--------------------------------------------------------------------------------------------------
!> @brief returns the 2nd Piola-Kirchhoff stress tensor and its tangent with respect to
!> the elastic deformation gradient using hookes law
!> the elastic and intermeidate deformation gradients using Hookes law
!--------------------------------------------------------------------------------------------------
subroutine constitutive_hooke_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip, el)
subroutine constitutive_hooke_SandItsTangents(S, dS_dFe, dS_dFi, Fe, Fi, ipc, ip, el)
use prec, only: &
pReal
use math, only : &
math_mul3x3, &
math_mul33x33, &
math_mul3333xx33, &
math_Mandel66to3333, &
math_trace33, &
math_I3
use material, only: &
material_phase, &
material_homog, &
material_homogenizationAt, &
phase_NstiffnessDegradations, &
phase_stiffnessDegradation, &
damage, &
@ -758,10 +780,10 @@ subroutine constitutive_hooke_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip,
Fe, & !< elastic deformation gradient
Fi !< intermediate deformation gradient
real(pReal), intent(out), dimension(3,3) :: &
T !< 2nd Piola-Kirchhoff stress tensor in lattice configuration
S !< 2nd Piola-Kirchhoff stress tensor in lattice configuration
real(pReal), intent(out), dimension(3,3,3,3) :: &
dT_dFe, & !< derivative of 2nd P-K stress with respect to elastic deformation gradient
dT_dFi !< derivative of 2nd P-K stress with respect to intermediate deformation gradient
dS_dFe, & !< derivative of 2nd P-K stress with respect to elastic deformation gradient
dS_dFi !< derivative of 2nd P-K stress with respect to intermediate deformation gradient
real(pReal), dimension(3,3) :: E
real(pReal), dimension(3,3,3,3) :: C
integer(pInt) :: &
@ -770,8 +792,7 @@ subroutine constitutive_hooke_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip,
integer(pInt) :: &
i, j
ho = material_homog(ip,el)
ho = material_homogenizationAt(el)
C = math_Mandel66to3333(constitutive_homogenizedC(ipc,ip,el))
DegradationLoop: do d = 1_pInt, phase_NstiffnessDegradations(material_phase(ipc,ip,el))
@ -784,22 +805,22 @@ subroutine constitutive_hooke_TandItsTangent(T, dT_dFe, dT_dFi, Fe, Fi, ipc, ip,
enddo DegradationLoop
E = 0.5_pReal*(math_mul33x33(transpose(Fe),Fe)-math_I3) !< Green-Lagrange strain in unloaded configuration
T = math_mul3333xx33(C,math_mul33x33(math_mul33x33(transpose(Fi),E),Fi)) !< 2PK stress in lattice configuration in work conjugate with GL strain pulled back to lattice configuration
S = math_mul3333xx33(C,math_mul33x33(math_mul33x33(transpose(Fi),E),Fi)) !< 2PK stress in lattice configuration in work conjugate with GL strain pulled back to lattice configuration
dT_dFe = 0.0_pReal
dS_dFe = 0.0_pReal
forall (i=1_pInt:3_pInt, j=1_pInt:3_pInt)
dT_dFe(i,j,1:3,1:3) = &
math_mul33x33(Fe,math_mul33x33(math_mul33x33(Fi,C(i,j,1:3,1:3)),transpose(Fi))) !< dT_ij/dFe_kl = C_ijmn * Fi_lm * Fi_on * Fe_ko
dT_dFi(i,j,1:3,1:3) = 2.0_pReal*math_mul33x33(math_mul33x33(E,Fi),C(i,j,1:3,1:3)) !< dT_ij/dFi_kl = C_ijln * E_km * Fe_mn
dS_dFe(i,j,1:3,1:3) = &
math_mul33x33(Fe,math_mul33x33(math_mul33x33(Fi,C(i,j,1:3,1:3)),transpose(Fi))) !< dS_ij/dFe_kl = C_ijmn * Fi_lm * Fi_on * Fe_ko
dS_dFi(i,j,1:3,1:3) = 2.0_pReal*math_mul33x33(math_mul33x33(E,Fi),C(i,j,1:3,1:3)) !< dS_ij/dFi_kl = C_ijln * E_km * Fe_mn
end forall
end subroutine constitutive_hooke_TandItsTangent
end subroutine constitutive_hooke_SandItsTangents
!--------------------------------------------------------------------------------------------------
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
!--------------------------------------------------------------------------------------------------
subroutine constitutive_collectDotState(Tstar_v, FeArray, FpArray, subdt, subfracArray,ipc, ip, el)
subroutine constitutive_collectDotState(S6, FeArray, Fi, FpArray, subdt, subfracArray,ipc, ip, el)
use prec, only: &
pReal, &
pLongInt
@ -807,15 +828,22 @@ subroutine constitutive_collectDotState(Tstar_v, FeArray, FpArray, subdt, subfra
debug_level, &
debug_constitutive, &
debug_levelBasic
use math, only: &
math_mul33x33, &
math_Mandel6to33, &
math_Mandel33to6, &
math_mul33x33
use mesh, only: &
mesh_NcpElems, &
mesh_maxNips
use material, only: &
phasememberAt, &
phase_plasticityInstance, &
phase_plasticity, &
phase_source, &
phase_Nsources, &
material_phase, &
material_homog, &
material_homogenizationAt, &
temperature, &
thermalMapping, &
homogenization_maxNgrains, &
@ -863,45 +891,67 @@ subroutine constitutive_collectDotState(Tstar_v, FeArray, FpArray, subdt, subfra
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems) :: &
FeArray, & !< elastic deformation gradient
FpArray !< plastic deformation gradient
real(pReal), intent(in), dimension(3,3) :: &
Fi !< intermediate deformation gradient
real(pReal), intent(in), dimension(6) :: &
Tstar_v !< 2nd Piola Kirchhoff stress tensor (Mandel)
S6 !< 2nd Piola Kirchhoff stress (vector notation)
real(pReal), dimension(3,3) :: &
Mp
integer(pInt) :: &
ho, & !< homogenization
tme, & !< thermal member position
s !< counter in source loop
s, & !< counter in source loop
instance, of
ho = material_homog( ip,el)
ho = material_homogenizationAt(el)
tme = thermalMapping(ho)%p(ip,el)
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_Mandel6to33(S6))
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
case (PLASTICITY_ISOTROPIC_ID) plasticityType
call plastic_isotropic_dotState (Tstar_v,ipc,ip,el)
call plastic_isotropic_dotState (math_Mandel33to6(Mp),ipc,ip,el)
case (PLASTICITY_PHENOPOWERLAW_ID) plasticityType
call plastic_phenopowerlaw_dotState(Tstar_v,ipc,ip,el)
of = phasememberAt(ipc,ip,el)
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
call plastic_phenopowerlaw_dotState(Mp,instance,of)
case (PLASTICITY_KINEHARDENING_ID) plasticityType
call plastic_kinehardening_dotState(Tstar_v,ipc,ip,el)
call plastic_kinehardening_dotState(math_Mandel33to6(Mp),ipc,ip,el)
case (PLASTICITY_DISLOTWIN_ID) plasticityType
call plastic_dislotwin_dotState (Tstar_v,temperature(ho)%p(tme), &
call plastic_dislotwin_dotState (math_Mandel33to6(Mp),temperature(ho)%p(tme), &
ipc,ip,el)
case (PLASTICITY_DISLOUCLA_ID) plasticityType
call plastic_disloucla_dotState (Tstar_v,temperature(ho)%p(tme), &
call plastic_disloucla_dotState (math_Mandel33to6(Mp),temperature(ho)%p(tme), &
ipc,ip,el)
case (PLASTICITY_NONLOCAL_ID) plasticityType
call plastic_nonlocal_dotState (Tstar_v,FeArray,FpArray,temperature(ho)%p(tme), &
call plastic_nonlocal_dotState (math_Mandel33to6(Mp),FeArray,FpArray,temperature(ho)%p(tme), &
subdt,subfracArray,ip,el)
end select plasticityType
SourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
sourceType: select case (phase_source(s,material_phase(ipc,ip,el)))
sourceType: select case (phase_source(s,material_phase(ipc,ip,el)))
case (SOURCE_damage_anisoBrittle_ID) sourceType
call source_damage_anisoBrittle_dotState (Tstar_v, ipc, ip, el)
call source_damage_anisoBrittle_dotState (S6, ipc, ip, el) !< correct stress?
case (SOURCE_damage_isoDuctile_ID) sourceType
call source_damage_isoDuctile_dotState ( ipc, ip, el)
case (SOURCE_damage_anisoDuctile_ID) sourceType
call source_damage_anisoDuctile_dotState ( ipc, ip, el)
case (SOURCE_thermal_externalheat_ID) sourceType
call source_thermal_externalheat_dotState( ipc, ip, el)
end select sourceType
enddo SourceLoop
end subroutine constitutive_collectDotState
@ -910,7 +960,7 @@ end subroutine constitutive_collectDotState
!> @brief for constitutive models having an instantaneous change of state
!> will return false if delta state is not needed/supported by the constitutive model
!--------------------------------------------------------------------------------------------------
subroutine constitutive_collectDeltaState(Tstar_v, Fe, ipc, ip, el)
subroutine constitutive_collectDeltaState(S6, Fe, Fi, ipc, ip, el)
use prec, only: &
pReal, &
pLongInt
@ -918,6 +968,10 @@ subroutine constitutive_collectDeltaState(Tstar_v, Fe, ipc, ip, el)
debug_level, &
debug_constitutive, &
debug_levelBasic
use math, only: &
math_Mandel6to33, &
math_Mandel33to6, &
math_mul33x33
use material, only: &
phase_plasticity, &
phase_source, &
@ -945,29 +999,43 @@ subroutine constitutive_collectDeltaState(Tstar_v, Fe, ipc, ip, el)
ip, & !< integration point
el !< element
real(pReal), intent(in), dimension(6) :: &
Tstar_v !< 2nd Piola-Kirchhoff stress
S6 !< 2nd Piola Kirchhoff stress (vector notation)
real(pReal), intent(in), dimension(3,3) :: &
Fe !< elastic deformation gradient
Fe, & !< elastic deformation gradient
Fi !< intermediate deformation gradient
real(pReal), dimension(3,3) :: &
Mstar
integer(pInt) :: &
s !< counter in source loop
Mstar = math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_Mandel6to33(S6))
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
case (PLASTICITY_KINEHARDENING_ID) plasticityType
call plastic_kinehardening_deltaState(Tstar_v,ipc,ip,el)
call plastic_kinehardening_deltaState(math_Mandel33to6(Mstar),ipc,ip,el)
case (PLASTICITY_NONLOCAL_ID) plasticityType
call plastic_nonlocal_deltaState(Tstar_v,ip,el)
call plastic_nonlocal_deltaState(math_Mandel33to6(Mstar),ip,el)
end select plasticityType
SourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
sourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))
sourceType: select case (phase_source(s,material_phase(ipc,ip,el)))
case (SOURCE_damage_isoBrittle_ID) sourceType
call source_damage_isoBrittle_deltaState (constitutive_homogenizedC(ipc,ip,el), Fe, &
ipc, ip, el)
ipc, ip, el)
case (SOURCE_vacancy_irradiation_ID) sourceType
call source_vacancy_irradiation_deltaState(ipc, ip, el)
case (SOURCE_vacancy_thermalfluc_ID) sourceType
call source_vacancy_thermalfluc_deltaState(ipc, ip, el)
end select sourceType
enddo SourceLoop
end subroutine constitutive_collectDeltaState
@ -976,20 +1044,25 @@ end subroutine constitutive_collectDeltaState
!--------------------------------------------------------------------------------------------------
!> @brief returns array of constitutive results
!--------------------------------------------------------------------------------------------------
function constitutive_postResults(Tstar_v, FeArray, ipc, ip, el)
function constitutive_postResults(S6, Fi, FeArray, ipc, ip, el)
use prec, only: &
pReal
use math, only: &
math_Mandel6to33, &
math_mul33x33
use mesh, only: &
mesh_NcpElems, &
mesh_maxNips
use material, only: &
phasememberAt, &
phase_plasticityInstance, &
plasticState, &
sourceState, &
phase_plasticity, &
phase_source, &
phase_Nsources, &
material_phase, &
material_homog, &
material_homogenizationAt, &
temperature, &
thermalMapping, &
homogenization_maxNgrains, &
@ -1033,20 +1106,26 @@ function constitutive_postResults(Tstar_v, FeArray, ipc, ip, el)
real(pReal), dimension(plasticState(material_phase(ipc,ip,el))%sizePostResults + &
sum(sourceState(material_phase(ipc,ip,el))%p(:)%sizePostResults)) :: &
constitutive_postResults
real(pReal), intent(in), dimension(3,3) :: &
Fi !< intermediate deformation gradient
real(pReal), intent(in), dimension(3,3,homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems) :: &
FeArray !< elastic deformation gradient
real(pReal), intent(in), dimension(6) :: &
Tstar_v !< 2nd Piola Kirchhoff stress tensor (Mandel)
S6 !< 2nd Piola Kirchhoff stress (vector notation)
real(pReal), dimension(3,3) :: &
Mp !< Mandel stress
integer(pInt) :: &
startPos, endPos
integer(pInt) :: &
ho, & !< homogenization
tme, & !< thermal member position
s !< counter in source loop
s, of, instance !< counter in source loop
constitutive_postResults = 0.0_pReal
ho = material_homog( ip,el)
Mp = math_mul33x33(math_mul33x33(transpose(Fi),Fi),math_Mandel6to33(S6))
ho = material_homogenizationAt(el)
tme = thermalMapping(ho)%p(ip,el)
startPos = 1_pInt
@ -1054,22 +1133,25 @@ function constitutive_postResults(Tstar_v, FeArray, ipc, ip, el)
plasticityType: select case (phase_plasticity(material_phase(ipc,ip,el)))
case (PLASTICITY_ISOTROPIC_ID) plasticityType
constitutive_postResults(startPos:endPos) = plastic_isotropic_postResults(Tstar_v,ipc,ip,el)
case (PLASTICITY_PHENOPOWERLAW_ID) plasticityType
constitutive_postResults(startPos:endPos) = &
plastic_phenopowerlaw_postResults(Tstar_v,ipc,ip,el)
plastic_isotropic_postResults(S6,ipc,ip,el)
case (PLASTICITY_PHENOPOWERLAW_ID) plasticityType
of = phasememberAt(ipc,ip,el)
instance = phase_plasticityInstance(material_phase(ipc,ip,el))
constitutive_postResults(startPos:endPos) = &
plastic_phenopowerlaw_postResults(Mp,instance,of)
case (PLASTICITY_KINEHARDENING_ID) plasticityType
constitutive_postResults(startPos:endPos) = &
plastic_kinehardening_postResults(Tstar_v,ipc,ip,el)
plastic_kinehardening_postResults(S6,ipc,ip,el)
case (PLASTICITY_DISLOTWIN_ID) plasticityType
constitutive_postResults(startPos:endPos) = &
plastic_dislotwin_postResults(Tstar_v,temperature(ho)%p(tme),ipc,ip,el)
plastic_dislotwin_postResults(S6,temperature(ho)%p(tme),ipc,ip,el)
case (PLASTICITY_DISLOUCLA_ID) plasticityType
constitutive_postResults(startPos:endPos) = &
plastic_disloucla_postResults(Tstar_v,temperature(ho)%p(tme),ipc,ip,el)
plastic_disloucla_postResults(S6,temperature(ho)%p(tme),ipc,ip,el)
case (PLASTICITY_NONLOCAL_ID) plasticityType
constitutive_postResults(startPos:endPos) = &
plastic_nonlocal_postResults (Tstar_v,FeArray,ip,el)
plastic_nonlocal_postResults (S6,FeArray,ip,el)
end select plasticityType
SourceLoop: do s = 1_pInt, phase_Nsources(material_phase(ipc,ip,el))

File diff suppressed because it is too large Load Diff

View File

@ -109,6 +109,9 @@ subroutine debug_init
character(len=65536) :: tag, line
write(6,'(/,a)') ' <<<+- debug init -+>>>'
#ifdef DEBUG
write(6,'(a)') achar(27)//'[31m <<<+- DEBUG version -+>>>'//achar(27)//'[0m'
#endif
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"

View File

@ -449,8 +449,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
subStepSizeHomog, &
stepIncreaseHomog, &
nMPstate
use math, only: &
math_transpose33
use FEsolving, only: &
FEsolving_execElem, &
FEsolving_execIP, &
@ -497,6 +495,7 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
crystallite_converged, &
crystallite_stressAndItsTangent, &
crystallite_orientations
#ifdef DEBUG
use debug, only: &
debug_level, &
debug_homogenization, &
@ -505,6 +504,7 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
debug_levelSelective, &
debug_e, &
debug_i
#endif
implicit none
real(pReal), intent(in) :: dt !< time increment
@ -518,18 +518,16 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
mySource, &
myNgrains
!--------------------------------------------------------------------------------------------------
! initialize to starting condition
#ifdef DEBUG
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
!$OMP CRITICAL (write2out)
write(6,'(/a,i5,1x,i2)') '<< HOMOG >> Material Point start at el ip ', debug_e, debug_i
write(6,'(a,/,3(12x,3(f14.9,1x)/))') '<< HOMOG >> F0', &
math_transpose33(materialpoint_F0(1:3,1:3,debug_i,debug_e))
transpose(materialpoint_F0(1:3,1:3,debug_i,debug_e))
write(6,'(a,/,3(12x,3(f14.9,1x)/))') '<< HOMOG >> F', &
math_transpose33(materialpoint_F(1:3,1:3,debug_i,debug_e))
!$OMP END CRITICAL (write2out)
transpose(materialpoint_F(1:3,1:3,debug_i,debug_e))
endif
#endif
!--------------------------------------------------------------------------------------------------
! initialize restoration points of ...
@ -609,10 +607,8 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
!---------------------------------------------------------------------------------------------------
! calculate new subStep and new subFrac
materialpoint_subFrac(i,e) = materialpoint_subFrac(i,e) + materialpoint_subStep(i,e)
!$OMP FLUSH(materialpoint_subFrac)
materialpoint_subStep(i,e) = min(1.0_pReal-materialpoint_subFrac(i,e), &
stepIncreaseHomog*materialpoint_subStep(i,e)) ! introduce flexibility for step increase/acceleration
!$OMP FLUSH(materialpoint_subStep)
steppingNeeded: if (materialpoint_subStep(i,e) > subStepMinHomog) then
@ -672,7 +668,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
hydrogenfluxState(mappingHomogenization(2,i,e))%subState0(:,mappingHomogenization(1,i,e)) = &
hydrogenfluxState(mappingHomogenization(2,i,e))%State( :,mappingHomogenization(1,i,e))! ...internal hydrogen transport state
materialpoint_subF0(1:3,1:3,i,e) = materialpoint_subF(1:3,1:3,i,e) ! ...def grad
!$OMP FLUSH(materialpoint_subF0)
endif steppingNeeded
else converged
@ -690,7 +685,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
!$OMP END CRITICAL (setTerminallyIll)
else ! cutback makes sense
materialpoint_subStep(i,e) = subStepSizeHomog * materialpoint_subStep(i,e) ! crystallite had severe trouble, so do a significant cutback
!$OMP FLUSH(materialpoint_subStep)
#ifdef DEBUG
if (iand(debug_level(debug_homogenization), debug_levelExtensive) /= 0_pInt &
@ -753,8 +747,9 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
if (materialpoint_subStep(i,e) > subStepMinHomog) then
materialpoint_requested(i,e) = .true.
materialpoint_subF(1:3,1:3,i,e) = materialpoint_subF0(1:3,1:3,i,e) + &
materialpoint_subStep(i,e) * (materialpoint_F(1:3,1:3,i,e) - materialpoint_F0(1:3,1:3,i,e))
materialpoint_subF(1:3,1:3,i,e) = materialpoint_subF0(1:3,1:3,i,e) &
+ materialpoint_subStep(i,e) * (materialpoint_F(1:3,1:3,i,e) &
- materialpoint_F0(1:3,1:3,i,e))
materialpoint_subdt(i,e) = materialpoint_subStep(i,e) * dt
materialpoint_doneAndHappy(1:2,i,e) = [.false.,.true.]
endif
@ -811,13 +806,6 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
materialpoint_doneAndHappy(1:2,i,e) = homogenization_updateState(i,e)
materialpoint_converged(i,e) = all(materialpoint_doneAndHappy(1:2,i,e)) ! converged if done and happy
endif
!$OMP FLUSH(materialpoint_converged)
if (materialpoint_converged(i,e)) then
if (iand(debug_level(debug_homogenization), debug_levelBasic) /= 0_pInt) then
!$OMP CRITICAL (distributionMPState)
!$OMP END CRITICAL (distributionMPState)
endif
endif
endif
enddo IpLooping3
enddo elementLooping3
@ -839,9 +827,7 @@ subroutine materialpoint_stressAndItsTangent(updateJaco,dt)
enddo elementLooping4
!$OMP END PARALLEL DO
else
!$OMP CRITICAL (write2out)
write(6,'(/,a,/)') '<< HOMOG >> Material Point terminally ill'
!$OMP END CRITICAL (write2out)
endif
end subroutine materialpoint_stressAndItsTangent

File diff suppressed because it is too large Load Diff

View File

@ -169,6 +169,7 @@ module material
homogenization_maxNgrains !< max number of grains in any USED homogenization
integer(pInt), dimension(:), allocatable, public, protected :: &
material_homogenizationAt, & !< homogenization ID of each element (copy of mesh_homogenizationAt)
phase_Nsources, & !< number of source mechanisms active in each phase
phase_Nkinematics, & !< number of kinematic mechanisms active in each phase
phase_NstiffnessDegradations, & !< number of stiffness degradation mechanisms active in each phase
@ -199,8 +200,10 @@ module material
integer(pInt), dimension(:,:,:), allocatable, public :: &
material_phase !< phase (index) of each grain,IP,element
! BEGIN DEPRECATED: use material_homogenizationAt
integer(pInt), dimension(:,:), allocatable, public :: &
material_homog !< homogenization (index) of each IP,element
! END DEPRECATED
type(tPlasticState), allocatable, dimension(:), public :: &
plasticState
type(tSourceState), allocatable, dimension(:), public :: &
@ -362,10 +365,10 @@ subroutine material_init()
phase_name, &
texture_name
use mesh, only: &
mesh_homogenizationAt, &
mesh_NipsPerElem, &
mesh_maxNips, &
mesh_NcpElems, &
mesh_element, &
FE_Nips, &
FE_geomtype
implicit none
@ -480,11 +483,11 @@ subroutine material_init()
allocate(CrystallitePosition (size(config_phase)), source=0_pInt)
ElemLoop:do e = 1_pInt,mesh_NcpElems
myHomog = mesh_element(3,e)
IPloop:do i = 1_pInt,FE_Nips(FE_geomtype(mesh_element(2,e)))
myHomog = mesh_homogenizationAt(e)
IPloop:do i = 1_pInt, mesh_NipsPerElem
HomogenizationPosition(myHomog) = HomogenizationPosition(myHomog) + 1_pInt
mappingHomogenization(1:2,i,e) = [HomogenizationPosition(myHomog),myHomog]
GrainLoop:do g = 1_pInt,homogenization_Ngrains(mesh_element(3,e))
GrainLoop:do g = 1_pInt,homogenization_Ngrains(myHomog)
phase = material_phase(g,i,e)
ConstitutivePosition(phase) = ConstitutivePosition(phase)+1_pInt ! not distinguishing between instances of same phase
phaseAt(g,i,e) = phase
@ -519,10 +522,10 @@ end subroutine material_init
subroutine material_parseHomogenization
use config, only : &
config_homogenization
use mesh, only: &
mesh_homogenizationAt
use IO, only: &
IO_error
use mesh, only: &
mesh_element
implicit none
integer(pInt) :: h
@ -549,7 +552,8 @@ subroutine material_parseHomogenization
allocate(porosity_initialPhi(size(config_homogenization)), source=1.0_pReal)
allocate(hydrogenflux_initialCh(size(config_homogenization)), source=0.0_pReal)
forall (h = 1_pInt:size(config_homogenization)) homogenization_active(h) = any(mesh_element(3,:) == h)
forall (h = 1_pInt:size(config_homogenization)) &
homogenization_active(h) = any(mesh_homogenizationAt == h)
do h=1_pInt, size(config_homogenization)
@ -685,7 +689,7 @@ subroutine material_parseMicrostructure
config_microstructure, &
microstructure_name
use mesh, only: &
mesh_element, &
mesh_microstructureAt, &
mesh_NcpElems
implicit none
@ -701,10 +705,11 @@ subroutine material_parseMicrostructure
allocate(microstructure_active(size(config_microstructure)), source=.false.)
allocate(microstructure_elemhomo(size(config_microstructure)), source=.false.)
if(any(mesh_element(4,1:mesh_NcpElems) > size(config_microstructure))) &
if(any(mesh_microstructureAt > size(config_microstructure))) &
call IO_error(155_pInt,ext_msg='More microstructures in geometry than sections in material.config')
forall (e = 1_pInt:mesh_NcpElems) microstructure_active(mesh_element(4,e)) = .true. ! current microstructure used in model? Elementwise view, maximum N operations for N elements
forall (e = 1_pInt:mesh_NcpElems) &
microstructure_active(mesh_microstructureAt(e)) = .true. ! current microstructure used in model? Elementwise view, maximum N operations for N elements
do m=1_pInt, size(config_microstructure)
microstructure_Nconstituents(m) = config_microstructure(m)%countKeys('(constituent)')
@ -1082,11 +1087,13 @@ subroutine material_populateGrains
math_sampleFiberOri, &
math_symmetricEulers
use mesh, only: &
mesh_element, &
mesh_NipsPerElem, &
mesh_elemType, &
mesh_homogenizationAt, &
mesh_microstructureAt, &
mesh_maxNips, &
mesh_NcpElems, &
mesh_ipVolume, &
FE_Nips, &
FE_geomtype
use config, only: &
config_homogenization, &
@ -1127,6 +1134,7 @@ subroutine material_populateGrains
allocate(material_volume(homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems), source=0.0_pReal)
allocate(material_phase(homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems), source=0_pInt)
allocate(material_homog(mesh_maxNips,mesh_NcpElems), source=0_pInt)
allocate(material_homogenizationAt,source=mesh_homogenizationAt)
allocate(material_texture(homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems), source=0_pInt)
allocate(material_EulerAngles(3,homogenization_maxNgrains,mesh_maxNips,mesh_NcpElems),source=0.0_pReal)
@ -1136,14 +1144,14 @@ subroutine material_populateGrains
! populating homogenization schemes in each
!--------------------------------------------------------------------------------------------------
do e = 1_pInt, mesh_NcpElems
material_homog(1_pInt:FE_Nips(FE_geomtype(mesh_element(2,e))),e) = mesh_element(3,e)
material_homog(1_pInt:mesh_NipsPerElem,e) = mesh_homogenizationAt(e)
enddo
!--------------------------------------------------------------------------------------------------
! precounting of elements for each homog/micro pair
do e = 1_pInt, mesh_NcpElems
homog = mesh_element(3,e)
micro = mesh_element(4,e)
homog = mesh_homogenizationAt(e)
micro = mesh_microstructureAt(e)
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt
enddo
allocate(elemsOfHomogMicro(size(config_homogenization),size(config_microstructure)))
@ -1160,9 +1168,9 @@ subroutine material_populateGrains
! identify maximum grain count per IP (from element) and find grains per homog/micro pair
Nelems = 0_pInt ! reuse as counter
elementLooping: do e = 1_pInt,mesh_NcpElems
t = FE_geomtype(mesh_element(2,e))
homog = mesh_element(3,e)
micro = mesh_element(4,e)
t = mesh_elemType
homog = mesh_homogenizationAt(e)
micro = mesh_microstructureAt(e)
if (homog < 1_pInt .or. homog > size(config_homogenization)) & ! out of bounds
call IO_error(154_pInt,e,0_pInt,0_pInt)
if (micro < 1_pInt .or. micro > size(config_microstructure)) & ! out of bounds
@ -1170,7 +1178,7 @@ subroutine material_populateGrains
if (microstructure_elemhomo(micro)) then ! how many grains are needed at this element?
dGrains = homogenization_Ngrains(homog) ! only one set of Ngrains (other IPs are plain copies)
else
dGrains = homogenization_Ngrains(homog) * FE_Nips(t) ! each IP has Ngrains
dGrains = homogenization_Ngrains(homog) * mesh_NipsPerElem ! each IP has Ngrains
endif
Ngrains(homog,micro) = Ngrains(homog,micro) + dGrains ! total grain count
Nelems(homog,micro) = Nelems(homog,micro) + 1_pInt ! total element count
@ -1204,16 +1212,16 @@ subroutine material_populateGrains
do hme = 1_pInt, Nelems(homog,micro)
e = elemsOfHomogMicro(homog,micro)%p(hme) ! my combination of homog and micro, only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
t = FE_geomtype(mesh_element(2,e))
t = mesh_elemType
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
volumeOfGrain(grain+1_pInt:grain+dGrains) = sum(mesh_ipVolume(1:FE_Nips(t),e))/&
volumeOfGrain(grain+1_pInt:grain+dGrains) = sum(mesh_ipVolume(1:mesh_NipsPerElem,e))/&
real(dGrains,pReal) ! each grain combines size of all IPs in that element
grain = grain + dGrains ! wind forward by Ngrains@IP
else
forall (i = 1_pInt:FE_Nips(t)) & ! loop over IPs
forall (i = 1_pInt:mesh_NipsPerElem) & ! loop over IPs
volumeOfGrain(grain+(i-1)*dGrains+1_pInt:grain+i*dGrains) = &
mesh_ipVolume(i,e)/real(dGrains,pReal) ! assign IPvolume/Ngrains@IP to all grains of IP
grain = grain + FE_Nips(t) * dGrains ! wind forward by Nips*Ngrains@IP
grain = grain + mesh_NipsPerElem * dGrains ! wind forward by Nips*Ngrains@IP
endif
enddo
@ -1367,11 +1375,11 @@ subroutine material_populateGrains
do hme = 1_pInt, Nelems(homog,micro)
e = elemsOfHomogMicro(homog,micro)%p(hme) ! only perform calculations for elements with homog, micro combinations which is indexed in cpElemsindex
t = FE_geomtype(mesh_element(2,e))
t = mesh_elemType
if (microstructure_elemhomo(micro)) then ! homogeneous distribution of grains over each element's IPs
m = 1_pInt ! process only first IP
else
m = FE_Nips(t) ! process all IPs
m = mesh_NipsPerElem
endif
do i = 1_pInt, m ! loop over necessary IPs
@ -1409,7 +1417,7 @@ subroutine material_populateGrains
enddo
do i = i, FE_Nips(t) ! loop over IPs to (possibly) distribute copies from first IP
do i = i, mesh_NipsPerElem ! loop over IPs to (possibly) distribute copies from first IP
material_volume (1_pInt:dGrains,i,e) = material_volume (1_pInt:dGrains,1,e)
material_phase (1_pInt:dGrains,i,e) = material_phase (1_pInt:dGrains,1,e)
material_texture(1_pInt:dGrains,i,e) = material_texture(1_pInt:dGrains,1,e)

View File

@ -3,7 +3,6 @@
!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH
!> @author Christoph Koords, Max-Planck-Institut für Eisenforschung GmbH
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @author Krishna Komerla, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Sets up the mesh for the solvers MSC.Marc, Abaqus and the spectral solver
!--------------------------------------------------------------------------------------------------
module mesh
@ -14,35 +13,27 @@ module mesh
private
integer(pInt), public, protected :: &
mesh_NcpElems, & !< total number of CP elements in local mesh
mesh_NelemSets, &
mesh_maxNelemInSet, &
mesh_Nmaterials, &
mesh_elemType, & !< Element type of the mesh (only support homogeneous meshes)
mesh_Nnodes, & !< total number of nodes in mesh
mesh_Ncellnodes, & !< total number of cell nodes in mesh (including duplicates)
mesh_Ncells, & !< total number of cells in mesh
mesh_maxNnodes, & !< max number of nodes in any CP element
mesh_maxNips, & !< max number of IPs in any CP element
mesh_NipsPerElem, & !< number of IPs in per element
mesh_NcellnodesPerElem, & !< number of cell nodes per element
mesh_maxNipNeighbors, & !< max number of IP neighbors in any CP element
mesh_maxNsharedElems, & !< max number of CP elements sharing a node
mesh_maxNcellnodes, & !< max number of cell nodes in any CP element
mesh_Nelems !< total number of elements in mesh
#ifdef Spectral
integer(pInt), dimension(3), public, protected :: &
grid !< (global) grid
mesh_maxNsharedElems !< max number of CP elements sharing a node
!!!! BEGIN DEPRECATED !!!!!
integer(pInt), public, protected :: &
mesh_NcpElemsGlobal, & !< total number of CP elements in global mesh
grid3, & !< (local) grid in 3rd direction
grid3Offset !< (local) grid offset in 3rd direction
real(pReal), dimension(3), public, protected :: &
geomSize
real(pReal), public, protected :: &
size3, & !< (local) size in 3rd direction
size3offset !< (local) size offset in 3rd direction
#endif
mesh_maxNips, & !< max number of IPs in any CP element
mesh_maxNcellnodes !< max number of cell nodes in any CP element
!!!! BEGIN DEPRECATED !!!!!
integer(pInt), dimension(:), allocatable, public, protected :: &
mesh_homogenizationAt, & !< homogenization ID of each element
mesh_microstructureAt !< microstructure ID of each element
integer(pInt), dimension(:,:), allocatable, public, protected :: &
mesh_element, & !< FEid, type(internal representation), material, texture, node indices as CP IDs
mesh_CPnodeID, & !< nodes forming an element
mesh_element, & !DEPRECATED
mesh_sharedElem, & !< entryCount and list of elements containing node
mesh_nodeTwins !< node twins are surface nodes that lie exactly on opposite sides of the mesh (surfaces nodes with equal coordinate values in two dimensions)
@ -71,36 +62,18 @@ module mesh
logical, dimension(3), public, protected :: mesh_periodicSurface !< flag indicating periodic outer surfaces (used for fluxes)
#ifdef Marc4DAMASK
#if defined(Marc4DAMASK) || defined(Abaqus)
integer(pInt), private :: &
MarcVersion, & !< Version of input file format (Marc only)
hypoelasticTableStyle, & !< Table style (Marc only)
initialcondTableStyle !< Table style (Marc only)
integer(pInt), dimension(:), allocatable, private :: &
Marc_matNumber !< array of material numbers for hypoelastic material (Marc only)
mesh_maxNelemInSet, &
mesh_Nmaterials
#endif
integer(pInt), dimension(2), private :: &
mesh_maxValStateVar = 0_pInt
#ifndef Spectral
character(len=64), dimension(:), allocatable, private :: &
mesh_nameElemSet, & !< names of elementSet
mesh_nameMaterial, & !< names of material in solid section
mesh_mapMaterial !< name of elementSet for material
integer(pInt), dimension(:,:), allocatable, private :: &
mesh_mapElemSet !< list of elements in elementSet
#endif
integer(pInt), dimension(:,:), allocatable, private :: &
integer(pInt), dimension(:,:), allocatable, private :: &
mesh_cellnodeParent !< cellnode's parent element ID, cellnode's intra-element ID
#if defined(Marc4DAMASK) || defined(Abaqus)
integer(pInt), dimension(:,:), allocatable, target, private :: &
mesh_mapFEtoCPelem, & !< [sorted FEid, corresponding CPid]
mesh_mapFEtoCPnode !< [sorted FEid, corresponding CPid]
#endif
integer(pInt),dimension(:,:,:), allocatable, private :: &
mesh_cell !< cell connectivity for each element,ip/cell
@ -116,10 +89,6 @@ module mesh
integer(pInt), dimension(:,:,:,:), allocatable, private :: &
FE_subNodeOnIPFace
#ifdef Abaqus
logical, private :: noPart !< for cases where the ABAQUS input file does not use part/assembly information
#endif
! These definitions should actually reside in the FE-solver specific part (different for MARC/ABAQUS)
! Hence, I suggest to prefix with "FE_"
@ -375,60 +344,81 @@ module mesh
4 & ! element 21 (3D 20node 27ip)
],pInt)
! integer(pInt), dimension(FE_Nelemtypes), parameter, private :: MESH_VTKELEMTYPE = &
! int([ &
! 5, & ! element 6 (2D 3node 1ip)
! 22, & ! element 125 (2D 6node 3ip)
! 9, & ! element 11 (2D 4node 4ip)
! 23, & ! element 27 (2D 8node 9ip)
! 23, & ! element 54 (2D 8node 4ip)
! 10, & ! element 134 (3D 4node 1ip)
! 10, & ! element 157 (3D 5node 4ip)
! 24, & ! element 127 (3D 10node 4ip)
! 13, & ! element 136 (3D 6node 6ip)
! 12, & ! element 117 (3D 8node 1ip)
! 12, & ! element 7 (3D 8node 8ip)
! 25, & ! element 57 (3D 20node 8ip)
! 25 & ! element 21 (3D 20node 27ip)
! ],pInt)
!
! integer(pInt), dimension(FE_Ncelltypes), parameter, private :: MESH_VTKCELLTYPE = &
! int([ &
! 5, & ! (2D 3node)
! 9, & ! (2D 4node)
! 10, & ! (3D 4node)
! 12 & ! (3D 8node)
! ],pInt)
#if defined(Spectral)
integer(pInt), dimension(3), public, protected :: &
grid !< (global) grid
integer(pInt), public, protected :: &
mesh_NcpElemsGlobal, & !< total number of CP elements in global mesh
grid3, & !< (local) grid in 3rd direction
grid3Offset !< (local) grid offset in 3rd direction
real(pReal), dimension(3), public, protected :: &
geomSize
real(pReal), public, protected :: &
size3, & !< (local) size in 3rd direction
size3offset !< (local) size offset in 3rd direction
#elif defined(Marc4DAMASK) || defined(Abaqus)
integer(pInt), private :: &
mesh_Nelems, & !< total number of elements in mesh (including non-DAMASK elements)
mesh_maxNnodes, & !< max number of nodes in any CP element
mesh_NelemSets
character(len=64), dimension(:), allocatable, private :: &
mesh_nameElemSet, & !< names of elementSet
mesh_nameMaterial, & !< names of material in solid section
mesh_mapMaterial !< name of elementSet for material
integer(pInt), dimension(:,:), allocatable, private :: &
mesh_mapElemSet !< list of elements in elementSet
integer(pInt), dimension(:,:), allocatable, target, private :: &
mesh_mapFEtoCPelem, & !< [sorted FEid, corresponding CPid]
mesh_mapFEtoCPnode !< [sorted FEid, corresponding CPid]
#endif
#if defined(Marc4DAMASK)
integer(pInt), private :: &
MarcVersion, & !< Version of input file format (Marc only)
hypoelasticTableStyle, & !< Table style (Marc only)
initialcondTableStyle !< Table style (Marc only)
integer(pInt), dimension(:), allocatable, private :: &
Marc_matNumber !< array of material numbers for hypoelastic material (Marc only)
#elif defined(Abaqus)
logical, private :: noPart !< for cases where the ABAQUS input file does not use part/assembly information
#endif
public :: &
mesh_init, &
#if defined(Marc4DAMASK) || defined(Abaqus)
mesh_FEasCP, &
#endif
mesh_build_cellnodes, &
mesh_build_ipVolumes, &
mesh_build_ipCoordinates, &
mesh_cellCenterCoordinates, &
mesh_get_Ncellnodes, &
mesh_get_unitlength, &
mesh_get_nodeAtIP
#ifdef Spectral
public :: &
mesh_get_nodeAtIP, &
#if defined(Spectral)
mesh_spectral_getGrid, &
mesh_spectral_getSize
#elif defined(Marc4DAMASK) || defined(Abaqus)
mesh_FEasCP
#endif
private :: &
#ifdef Spectral
mesh_get_damaskOptions, &
mesh_build_cellconnectivity, &
mesh_build_ipAreas, &
mesh_tell_statistics, &
FE_mapElemtype, &
mesh_faceMatch, &
mesh_build_FEdata, &
#if defined(Spectral)
mesh_spectral_getHomogenization, &
mesh_spectral_count, &
mesh_spectral_count_cpSizes, &
mesh_spectral_build_nodes, &
mesh_spectral_build_elements, &
mesh_spectral_build_ipNeighborhood, &
#elif defined Marc4DAMASK
mesh_spectral_build_ipNeighborhood
#elif defined(Marc4DAMASK) || defined(Abaqus)
mesh_build_nodeTwins, &
mesh_build_sharedElems, &
mesh_build_ipNeighborhood, &
#endif
#if defined(Marc4DAMASK)
mesh_marc_get_fileFormat, &
mesh_marc_get_tableStyles, &
mesh_marc_get_matNumber, &
@ -440,8 +430,8 @@ module mesh
mesh_marc_map_nodes, &
mesh_marc_build_nodes, &
mesh_marc_count_cpSizes, &
mesh_marc_build_elements, &
#elif defined Abaqus
mesh_marc_build_elements
#elif defined(Abaqus)
mesh_abaqus_count_nodesAndElements, &
mesh_abaqus_count_elementSets, &
mesh_abaqus_count_materials, &
@ -452,20 +442,8 @@ module mesh
mesh_abaqus_map_nodes, &
mesh_abaqus_build_nodes, &
mesh_abaqus_count_cpSizes, &
mesh_abaqus_build_elements, &
mesh_abaqus_build_elements
#endif
#ifndef Spectral
mesh_build_nodeTwins, &
mesh_build_sharedElems, &
mesh_build_ipNeighborhood, &
#endif
mesh_get_damaskOptions, &
mesh_build_cellconnectivity, &
mesh_build_ipAreas, &
mesh_tell_statistics, &
FE_mapElemtype, &
mesh_faceMatch, &
mesh_build_FEdata
contains
@ -509,12 +487,12 @@ subroutine mesh_init(ip,el)
numerics_unitlength, &
worldrank
use FEsolving, only: &
FEsolving_execElem, &
#ifndef Spectral
modelName, &
calcMode, &
#endif
FEsolving_execIP, &
calcMode
FEsolving_execElem, &
FEsolving_execIP
implicit none
#ifdef Spectral
@ -523,7 +501,7 @@ subroutine mesh_init(ip,el)
integer :: ierr, worldsize
#endif
integer(pInt), parameter :: FILEUNIT = 222_pInt
integer(pInt), intent(in) :: el, ip
integer(pInt), intent(in), optional :: el, ip
integer(pInt) :: j
logical :: myDebug
@ -546,8 +524,12 @@ subroutine mesh_init(ip,el)
if(worldsize>grid(3)) call IO_error(894_pInt, ext_msg='number of processes exceeds grid(3)')
geomSize = mesh_spectral_getSize(fileUnit)
devNull = fftw_mpi_local_size_3d(int(grid(3),C_INTPTR_T),int(grid(2),C_INTPTR_T),&
int(grid(1),C_INTPTR_T)/2+1,PETSC_COMM_WORLD,local_K,local_K_offset)
devNull = fftw_mpi_local_size_3d(int(grid(3),C_INTPTR_T), &
int(grid(2),C_INTPTR_T), &
int(grid(1),C_INTPTR_T)/2+1, &
PETSC_COMM_WORLD, &
local_K, & ! domain grid size along z
local_K_offset) ! domain grid offset along z
grid3 = int(local_K,pInt)
grid3Offset = int(local_K_offset,pInt)
size3 = geomSize(3)*real(grid3,pReal) /real(grid(3),pReal)
@ -647,25 +629,36 @@ subroutine mesh_init(ip,el)
call mesh_tell_statistics
endif
#if defined(Marc4DAMASK) || defined(Abaqus)
if (usePingPong .and. (mesh_Nelems /= mesh_NcpElems)) &
call IO_error(600_pInt) ! ping-pong must be disabled when having non-DAMASK elements
#endif
if (debug_e < 1 .or. debug_e > mesh_NcpElems) &
call IO_error(602_pInt,ext_msg='element') ! selected element does not exist
if (debug_i < 1 .or. debug_i > FE_Nips(FE_geomtype(mesh_element(2_pInt,debug_e)))) &
call IO_error(602_pInt,ext_msg='IP') ! selected element does not have requested IP
FEsolving_execElem = [ 1_pInt,mesh_NcpElems ] ! parallel loop bounds set to comprise all DAMASK elements
allocate(FEsolving_execIP(2_pInt,mesh_NcpElems)); FEsolving_execIP = 1_pInt ! parallel loop bounds set to comprise from first IP...
allocate(FEsolving_execIP(2_pInt,mesh_NcpElems), source=1_pInt) ! parallel loop bounds set to comprise from first IP...
forall (j = 1_pInt:mesh_NcpElems) FEsolving_execIP(2,j) = FE_Nips(FE_geomtype(mesh_element(2,j))) ! ...up to own IP count for each element
#if defined(Marc4DAMASK) || defined(Abaqus)
allocate(calcMode(mesh_maxNips,mesh_NcpElems))
calcMode = .false. ! pretend to have collected what first call is asking (F = I)
#if defined(Marc4DAMASK) || defined(Abaqus)
calcMode(ip,mesh_FEasCP('elem',el)) = .true. ! first ip,el needs to be already pingponged to "calc"
#else
calcMode(ip,el) = .true. ! first ip,el needs to be already pingponged to "calc"
#endif
!!!! COMPATIBILITY HACK !!!!
! for a homogeneous mesh, all elements have the same number of IPs and and cell nodes.
! hence, xxPerElem instead of maxXX
mesh_NipsPerElem = mesh_maxNips
mesh_NcellnodesPerElem = mesh_maxNcellnodes
! better name
mesh_homogenizationAt = mesh_element(3,:)
mesh_microstructureAt = mesh_element(4,:)
mesh_CPnodeID = mesh_element(5:4+mesh_NipsPerElem,:)
!!!!!!!!!!!!!!!!!!!!!!!!
end subroutine mesh_init
@ -1184,8 +1177,7 @@ subroutine mesh_spectral_count()
implicit none
mesh_Nelems = product(grid(1:2))*grid3
mesh_NcpElems= mesh_Nelems
mesh_NcpElems= product(grid(1:2))*grid3
mesh_Nnodes = product(grid(1:2) + 1_pInt)*(grid3 + 1_pInt)
mesh_NcpElemsGlobal = product(grid)
@ -1195,7 +1187,7 @@ end subroutine mesh_spectral_count
!--------------------------------------------------------------------------------------------------
!> @brief Gets maximum count of nodes, IPs, IP neighbors, and subNodes among cpElements.
!! Sets global values 'mesh_maxNnodes', 'mesh_maxNips', 'mesh_maxNipNeighbors',
!! Sets global values 'mesh_maxNips', 'mesh_maxNipNeighbors',
!! and 'mesh_maxNcellnodes'
!--------------------------------------------------------------------------------------------------
subroutine mesh_spectral_count_cpSizes
@ -1207,7 +1199,6 @@ subroutine mesh_spectral_count_cpSizes
g = FE_geomtype(t)
c = FE_celltype(g)
mesh_maxNnodes = FE_Nnodes(t)
mesh_maxNips = FE_Nips(g)
mesh_maxNipNeighbors = FE_NipNeighbors(c)
mesh_maxNcellnodes = FE_Ncellnodes(g)
@ -1268,13 +1259,13 @@ subroutine mesh_spectral_build_elements(fileUnit)
integer(pInt) :: &
e, i, &
headerLength = 0_pInt, &
maxIntCount, &
maxDataPerLine, &
homog, &
elemType, &
elemOffset
integer(pInt), dimension(:), allocatable :: &
microstructures, &
mesh_microGlobal
microGlobal
integer(pInt), dimension(1,1) :: &
dummySet = 0_pInt
character(len=65536) :: &
@ -1304,16 +1295,16 @@ subroutine mesh_spectral_build_elements(fileUnit)
read(fileUnit,'(a65536)') line
enddo
maxIntCount = 0_pInt
maxDataPerLine = 0_pInt
i = 1_pInt
do while (i > 0_pInt)
i = IO_countContinuousIntValues(fileUnit)
maxIntCount = max(maxIntCount, i)
maxDataPerLine = max(maxDataPerLine, i) ! found a longer line?
enddo
allocate (mesh_element (4_pInt+mesh_maxNnodes,mesh_NcpElems), source = 0_pInt)
allocate (microstructures (1_pInt+maxIntCount), source = 1_pInt)
allocate (mesh_microGlobal(mesh_NcpElemsGlobal), source = 1_pInt)
allocate(mesh_element (4_pInt+8_pInt,mesh_NcpElems), source = 0_pInt)
allocate(microstructures (1_pInt+maxDataPerLine), source = 1_pInt) ! prepare to receive counter and max data size
allocate(microGlobal (mesh_NcpElemsGlobal), source = 1_pInt)
!--------------------------------------------------------------------------------------------------
! read in microstructures
@ -1324,10 +1315,10 @@ subroutine mesh_spectral_build_elements(fileUnit)
e = 0_pInt
do while (e < mesh_NcpElemsGlobal .and. microstructures(1) > 0_pInt) ! fill expected number of elements, stop at end of data (or blank line!)
microstructures = IO_continuousIntValues(fileUnit,maxIntCount,dummyName,dummySet,0_pInt) ! get affected elements
microstructures = IO_continuousIntValues(fileUnit,maxDataPerLine,dummyName,dummySet,0_pInt) ! get affected elements
do i = 1_pInt,microstructures(1_pInt)
e = e+1_pInt ! valid element entry
mesh_microGlobal(e) = microstructures(1_pInt+i)
microGlobal(e) = microstructures(1_pInt+i)
enddo
enddo
@ -1336,10 +1327,10 @@ subroutine mesh_spectral_build_elements(fileUnit)
e = 0_pInt
do while (e < mesh_NcpElems) ! fill expected number of elements, stop at end of data (or blank line!)
e = e+1_pInt ! valid element entry
mesh_element( 1,e) = e ! FE id
mesh_element( 1,e) = -1_pInt ! DEPRECATED
mesh_element( 2,e) = elemType ! elem type
mesh_element( 3,e) = homog ! homogenization
mesh_element( 4,e) = mesh_microGlobal(e+elemOffset) ! microstructure
mesh_element( 4,e) = microGlobal(e+elemOffset) ! microstructure
mesh_element( 5,e) = e + (e-1_pInt)/grid(1) + &
((e-1_pInt)/(grid(1)*grid(2)))*(grid(1)+1_pInt) ! base node
mesh_element( 6,e) = mesh_element(5,e) + 1_pInt
@ -1715,8 +1706,8 @@ subroutine mesh_marc_map_elementSets(fileUnit)
character(len=300) :: line
integer(pInt) :: elemSet = 0_pInt
allocate (mesh_nameElemSet(mesh_NelemSets)) ; mesh_nameElemSet = ''
allocate (mesh_mapElemSet(1_pInt+mesh_maxNelemInSet,mesh_NelemSets)) ; mesh_mapElemSet = 0_pInt
allocate (mesh_nameElemSet(mesh_NelemSets)); mesh_nameElemSet = ''
allocate (mesh_mapElemSet(1_pInt+mesh_maxNelemInSet,mesh_NelemSets), source=0_pInt)
610 FORMAT(A300)
@ -1813,7 +1804,7 @@ subroutine mesh_marc_map_elements(fileUnit)
integer(pInt), dimension (1_pInt+mesh_NcpElems) :: contInts
integer(pInt) :: i,cpElem = 0_pInt
allocate (mesh_mapFEtoCPelem(2,mesh_NcpElems)) ; mesh_mapFEtoCPelem = 0_pInt
allocate (mesh_mapFEtoCPelem(2,mesh_NcpElems), source = 0_pInt)
610 FORMAT(A300)
@ -1883,7 +1874,7 @@ subroutine mesh_marc_map_nodes(fileUnit)
integer(pInt), dimension (mesh_Nnodes) :: node_count
integer(pInt) :: i
allocate (mesh_mapFEtoCPnode(2_pInt,mesh_Nnodes)) ; mesh_mapFEtoCPnode = 0_pInt
allocate (mesh_mapFEtoCPnode(2_pInt,mesh_Nnodes),source=0_pInt)
610 FORMAT(A300)
@ -1930,8 +1921,8 @@ subroutine mesh_marc_build_nodes(fileUnit)
character(len=300) :: line
integer(pInt) :: i,j,m
allocate ( mesh_node0 (3,mesh_Nnodes) ); mesh_node0 = 0.0_pReal
allocate ( mesh_node (3,mesh_Nnodes) ); mesh_node = 0.0_pReal
allocate ( mesh_node0 (3,mesh_Nnodes), source=0.0_pReal)
allocate ( mesh_node (3,mesh_Nnodes), source=0.0_pReal)
610 FORMAT(A300)
@ -2023,7 +2014,8 @@ subroutine mesh_marc_build_elements(fileUnit)
IO_skipChunks, &
IO_stringPos, &
IO_intValue, &
IO_continuousIntValues
IO_continuousIntValues, &
IO_error
implicit none
integer(pInt), intent(in) :: fileUnit
@ -2034,7 +2026,8 @@ subroutine mesh_marc_build_elements(fileUnit)
integer(pInt), dimension(1_pInt+mesh_NcpElems) :: contInts
integer(pInt) :: i,j,t,sv,myVal,e,nNodesAlreadyRead
allocate (mesh_element(4_pInt+mesh_maxNnodes,mesh_NcpElems)) ; mesh_element = 0_pInt
allocate(mesh_element(4_pInt+mesh_maxNnodes,mesh_NcpElems), source=0_pInt)
mesh_elemType = -1_pInt
610 FORMAT(A300)
@ -2049,8 +2042,11 @@ subroutine mesh_marc_build_elements(fileUnit)
chunkPos = IO_stringPos(line)
e = mesh_FEasCP('elem',IO_intValue(line,chunkPos,1_pInt))
if (e /= 0_pInt) then ! disregard non CP elems
mesh_element(1,e) = IO_IntValue (line,chunkPos,1_pInt) ! FE id
t = FE_mapElemtype(IO_StringValue(line,chunkPos,2_pInt)) ! elem type
mesh_element(1,e) = -1_pInt ! DEPRECATED
t = FE_mapElemtype(IO_StringValue(line,chunkPos,2_pInt)) ! elem type
if (mesh_elemType /= t .and. mesh_elemType /= -1_pInt) &
call IO_error(191,el=t,ip=mesh_elemType)
mesh_elemType = t
mesh_element(2,e) = t
nNodesAlreadyRead = 0_pInt
do j = 1_pInt,chunkPos(1)-2_pInt
@ -2280,8 +2276,8 @@ subroutine mesh_abaqus_map_elementSets(fileUnit)
integer(pInt) :: elemSet = 0_pInt,i
logical :: inPart = .false.
allocate (mesh_nameElemSet(mesh_NelemSets)) ; mesh_nameElemSet = ''
allocate (mesh_mapElemSet(1_pInt+mesh_maxNelemInSet,mesh_NelemSets)) ; mesh_mapElemSet = 0_pInt
allocate (mesh_nameElemSet(mesh_NelemSets)); mesh_nameElemSet = ''
allocate (mesh_mapElemSet(1_pInt+mesh_maxNelemInSet,mesh_NelemSets),source=0_pInt)
610 FORMAT(A300)
@ -2332,8 +2328,8 @@ subroutine mesh_abaqus_map_materials(fileUnit)
logical :: inPart = .false.
character(len=64) :: elemSetName,materialName
allocate (mesh_nameMaterial(mesh_Nmaterials)) ; mesh_nameMaterial = ''
allocate (mesh_mapMaterial(mesh_Nmaterials)) ; mesh_mapMaterial = ''
allocate (mesh_nameMaterial(mesh_Nmaterials)); mesh_nameMaterial = ''
allocate (mesh_mapMaterial(mesh_Nmaterials)); mesh_mapMaterial = ''
610 FORMAT(A300)
@ -2450,7 +2446,7 @@ subroutine mesh_abaqus_map_elements(fileUnit)
logical :: materialFound = .false.
character (len=64) materialName,elemSetName ! why limited to 64? ABAQUS?
allocate (mesh_mapFEtoCPelem(2,mesh_NcpElems)) ; mesh_mapFEtoCPelem = 0_pInt
allocate (mesh_mapFEtoCPelem(2,mesh_NcpElems), source = 0_pInt)
610 FORMAT(A300)
@ -2513,7 +2509,7 @@ subroutine mesh_abaqus_map_nodes(fileUnit)
integer(pInt) :: i,c,cpNode = 0_pInt
logical :: inPart = .false.
allocate (mesh_mapFEtoCPnode(2_pInt,mesh_Nnodes)) ; mesh_mapFEtoCPnode = 0_pInt
allocate (mesh_mapFEtoCPnode(2_pInt,mesh_Nnodes), source=0_pInt)
610 FORMAT(A300)
@ -2575,8 +2571,8 @@ subroutine mesh_abaqus_build_nodes(fileUnit)
integer(pInt) :: i,j,m,c
logical :: inPart
allocate ( mesh_node0 (3,mesh_Nnodes) ); mesh_node0 = 0.0_pReal
allocate ( mesh_node (3,mesh_Nnodes) ); mesh_node = 0.0_pReal
allocate ( mesh_node0 (3,mesh_Nnodes), source=0.0_pReal)
allocate ( mesh_node (3,mesh_Nnodes), source=0.0_pReal)
610 FORMAT(A300)
@ -2688,8 +2684,8 @@ subroutine mesh_abaqus_build_elements(fileUnit)
IO_intValue, &
IO_extractValue, &
IO_floatValue, &
IO_error, &
IO_countDataLines
IO_countDataLines, &
IO_error
implicit none
integer(pInt), intent(in) :: fileUnit
@ -2701,7 +2697,8 @@ subroutine mesh_abaqus_build_elements(fileUnit)
character (len=64) :: materialName,elemSetName
character(len=300) :: line
allocate (mesh_element (4_pInt+mesh_maxNnodes,mesh_NcpElems)) ; mesh_element = 0_pInt
allocate(mesh_element (4_pInt+mesh_maxNnodes,mesh_NcpElems), source=0_pInt)
mesh_elemType = -1_pInt
610 FORMAT(A300)
@ -2720,17 +2717,20 @@ subroutine mesh_abaqus_build_elements(fileUnit)
IO_lc(IO_stringValue(line,chunkPos,2_pInt)) /= 'matrix' .and. &
IO_lc(IO_stringValue(line,chunkPos,2_pInt)) /= 'response' ) &
) then
t = FE_mapElemtype(IO_extractValue(IO_lc(IO_stringValue(line,chunkPos,2_pInt)),'type')) ! remember elem type
t = FE_mapElemtype(IO_extractValue(IO_lc(IO_stringValue(line,chunkPos,2_pInt)),'type')) ! remember elem type
c = IO_countDataLines(fileUnit)
do i = 1_pInt,c
backspace(fileUnit)
enddo
do i = 1_pInt,c
read (fileUnit,610,END=620) line
chunkPos = IO_stringPos(line) ! limit to 64 nodes max
chunkPos = IO_stringPos(line) ! limit to 64 nodes max
e = mesh_FEasCP('elem',IO_intValue(line,chunkPos,1_pInt))
if (e /= 0_pInt) then ! disregard non CP elems
mesh_element(1,e) = IO_intValue(line,chunkPos,1_pInt) ! FE id
if (e /= 0_pInt) then ! disregard non CP elems
mesh_element(1,e) = -1_pInt ! DEPRECATED
if (mesh_elemType /= t .and. mesh_elemType /= -1_pInt) &
call IO_error(191,el=t,ip=mesh_elemType)
mesh_elemType = t
mesh_element(2,e) = t ! elem type
nNodesAlreadyRead = 0_pInt
do j = 1_pInt,chunkPos(1)-1_pInt
@ -3010,7 +3010,7 @@ subroutine mesh_build_sharedElems
myDim, & ! dimension index
nodeTwin ! node twin in the specified dimension
integer(pInt), dimension (mesh_Nnodes) :: node_count
integer(pInt), dimension (:), allocatable :: node_seen
integer(pInt), dimension(:), allocatable :: node_seen
allocate(node_seen(maxval(FE_NmatchingNodes)))
@ -3035,8 +3035,7 @@ subroutine mesh_build_sharedElems
mesh_maxNsharedElems = int(maxval(node_count),pInt) ! most shared node
allocate(mesh_sharedElem(1+mesh_maxNsharedElems,mesh_Nnodes))
mesh_sharedElem = 0_pInt
allocate(mesh_sharedElem(1+mesh_maxNsharedElems,mesh_Nnodes),source=0_pInt)
do e = 1_pInt,mesh_NcpElems
g = FE_geomtype(mesh_element(2,e)) ! get elemGeomType
@ -3258,7 +3257,7 @@ subroutine mesh_tell_statistics
if (mesh_maxValStateVar(1) < 1_pInt) call IO_error(error_ID=170_pInt) ! no homogenization specified
if (mesh_maxValStateVar(2) < 1_pInt) call IO_error(error_ID=180_pInt) ! no microstructure specified
allocate (mesh_HomogMicro(mesh_maxValStateVar(1),mesh_maxValStateVar(2))); mesh_HomogMicro = 0_pInt
allocate (mesh_HomogMicro(mesh_maxValStateVar(1),mesh_maxValStateVar(2)),source = 0_pInt)
do e = 1_pInt,mesh_NcpElems
if (mesh_element(3,e) < 1_pInt) call IO_error(error_ID=170_pInt,el=e) ! no homogenization specified
if (mesh_element(4,e) < 1_pInt) call IO_error(error_ID=180_pInt,el=e) ! no microstructure specified
@ -3268,13 +3267,8 @@ subroutine mesh_tell_statistics
!$OMP CRITICAL (write2out)
if (iand(myDebug,debug_levelBasic) /= 0_pInt) then
write(6,'(/,a,/)') ' Input Parser: STATISTICS'
write(6,*) mesh_Nelems, ' : total number of elements in mesh'
write(6,*) mesh_NcpElems, ' : total number of CP elements in mesh'
write(6,*) mesh_Nnodes, ' : total number of nodes in mesh'
write(6,*) mesh_maxNnodes, ' : max number of nodes in any CP element'
write(6,*) mesh_maxNips, ' : max number of IPs in any CP element'
write(6,*) mesh_maxNipNeighbors, ' : max number of IP neighbors in any CP element'
write(6,*) mesh_maxNsharedElems, ' : max number of CP elements sharing a node'
write(6,'(/,a,/)') ' Input Parser: HOMOGENIZATION/MICROSTRUCTURE'
write(6,*) mesh_maxValStateVar(1), ' : maximum homogenization index'
write(6,*) mesh_maxValStateVar(2), ' : maximum microstructure index'
@ -3527,11 +3521,11 @@ subroutine mesh_build_FEdata
implicit none
integer(pInt) :: me
allocate(FE_nodesAtIP(FE_maxmaxNnodesAtIP,FE_maxNips,FE_Ngeomtypes)); FE_nodesAtIP = 0_pInt
allocate(FE_ipNeighbor(FE_maxNipNeighbors,FE_maxNips,FE_Ngeomtypes)); FE_ipNeighbor = 0_pInt
allocate(FE_cell(FE_maxNcellnodesPerCell,FE_maxNips,FE_Ngeomtypes)); FE_cell = 0_pInt
allocate(FE_cellnodeParentnodeWeights(FE_maxNnodes,FE_maxNcellnodes,FE_Nelemtypes)); FE_cellnodeParentnodeWeights = 0.0_pReal
allocate(FE_cellface(FE_maxNcellnodesPerCellface,FE_maxNcellfaces,FE_Ncelltypes)); FE_cellface = 0_pInt
allocate(FE_nodesAtIP(FE_maxmaxNnodesAtIP,FE_maxNips,FE_Ngeomtypes), source=0_pInt)
allocate(FE_ipNeighbor(FE_maxNipNeighbors,FE_maxNips,FE_Ngeomtypes), source=0_pInt)
allocate(FE_cell(FE_maxNcellnodesPerCell,FE_maxNips,FE_Ngeomtypes), source=0_pInt)
allocate(FE_cellnodeParentnodeWeights(FE_maxNnodes,FE_maxNcellnodes,FE_Nelemtypes), source=0.0_pReal)
allocate(FE_cellface(FE_maxNcellnodesPerCellface,FE_maxNcellfaces,FE_Ncelltypes), source=0_pInt)
!*** fill FE_nodesAtIP with data ***

View File

@ -8,30 +8,38 @@
!> results
!--------------------------------------------------------------------------------------------------
module mesh
#include <petsc/finclude/petscdmplex.h>
#include <petsc/finclude/petscis.h>
#include <petsc/finclude/petscdmda.h>
use prec, only: pReal, pInt
use PETScdmplex
use PETScdmda
use PETScis
implicit none
private
integer(pInt), public, parameter :: &
mesh_ElemType=1_pInt !< Element type of the mesh (only support homogeneous meshes)
integer(pInt), public, protected :: &
mesh_Nboundaries, &
mesh_NcpElems, & !< total number of CP elements in mesh
mesh_NcpElemsGlobal, &
mesh_Nnodes, & !< total number of nodes in mesh
mesh_maxNnodes, & !< max number of nodes in any CP element
mesh_maxNips, & !< max number of IPs in any CP element
mesh_maxNipNeighbors, &
mesh_Nelems !< total number of elements in mesh
mesh_NipsPerElem, & !< number of IPs in per element
mesh_maxNipNeighbors
!!!! BEGIN DEPRECATED !!!!!
integer(pInt), public, protected :: &
mesh_maxNips !< max number of IPs in any CP element
!!!! BEGIN DEPRECATED !!!!!
real(pReal), public, protected :: charLength
integer(pInt), dimension(:), allocatable, public, protected :: &
mesh_homogenizationAt, & !< homogenization ID of each element
mesh_microstructureAt !< microstructure ID of each element
integer(pInt), dimension(:,:), allocatable, public, protected :: &
mesh_element !< FEid, type(internal representation), material, texture, node indices as CP IDs
mesh_element !DEPRECATED
real(pReal), dimension(:,:), allocatable, public :: &
mesh_node !< node x,y,z coordinates (after deformation! ONLY FOR MARC!!!)
@ -56,30 +64,20 @@ use PETScis
DM, public :: geomMesh
integer(pInt), dimension(:), allocatable, public, protected :: &
PetscInt, dimension(:), allocatable, public, protected :: &
mesh_boundaries
integer(pInt), parameter, public :: &
FE_Nelemtypes = 1_pInt, &
FE_Ngeomtypes = 1_pInt, &
FE_Ncelltypes = 1_pInt, &
FE_maxNnodes = 1_pInt, &
FE_maxNips = 14_pInt
integer(pInt), dimension(FE_Nelemtypes), parameter, public :: FE_geomtype = & !< geometry type of particular element type
integer(pInt), dimension(1_pInt), parameter, public :: FE_geomtype = & !< geometry type of particular element type
int([1],pInt)
integer(pInt), dimension(FE_Ngeomtypes), parameter, public :: FE_celltype = & !< cell type that is used by each geometry type
integer(pInt), dimension(1_pInt), parameter, public :: FE_celltype = & !< cell type that is used by each geometry type
int([1],pInt)
integer(pInt), dimension(FE_Nelemtypes), parameter, public :: FE_Nnodes = & !< number of nodes that constitute a specific type of element
int([0],pInt)
integer(pInt), dimension(FE_Ngeomtypes), public :: FE_Nips = & !< number of IPs in a specific type of element
integer(pInt), dimension(1_pInt), public :: FE_Nips = & !< number of IPs in a specific type of element
int([0],pInt)
integer(pInt), dimension(FE_Ncelltypes), parameter, public :: FE_NipNeighbors = & !< number of ip neighbors / cell faces in a specific cell type
integer(pInt), dimension(1_pInt), parameter, public :: FE_NipNeighbors = & !< number of ip neighbors / cell faces in a specific cell type
int([6],pInt)
@ -89,15 +87,6 @@ use PETScis
mesh_FEM_build_ipCoordinates, &
mesh_cellCenterCoordinates
external :: &
DMPlexCreateFromFile, &
DMPlexDistribute, &
DMPlexCopyCoordinates, &
DMGetStratumSize, &
DMPlexGetHeightStratum, &
DMPlexGetLabelValue, &
DMPlexSetLabelValue
contains
@ -105,7 +94,7 @@ contains
!> @brief initializes the mesh by calling all necessary private routines the mesh module
!! Order and routines strongly depend on type of solver
!--------------------------------------------------------------------------------------------------
subroutine mesh_init(ip,el)
subroutine mesh_init()
use DAMASK_interface
use, intrinsic :: iso_fortran_env ! to get compiler_version and compiler_options (at least for gfortran 4.6 at the moment)
use IO, only: &
@ -127,15 +116,13 @@ subroutine mesh_init(ip,el)
worldsize
use FEsolving, only: &
FEsolving_execElem, &
FEsolving_execIP, &
calcMode
FEsolving_execIP
use FEM_Zoo, only: &
FEM_Zoo_nQuadrature, &
FEM_Zoo_QuadraturePoints
implicit none
integer(pInt), parameter :: FILEUNIT = 222_pInt
integer(pInt), intent(in) :: el, ip
integer(pInt) :: j
integer(pInt), allocatable, dimension(:) :: chunkPos
integer :: dimPlex
@ -154,14 +141,19 @@ subroutine mesh_init(ip,el)
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
! read in file
call DMPlexCreateFromFile(PETSC_COMM_WORLD,geometryFile,PETSC_TRUE,globalMesh,ierr)
CHKERRQ(ierr)
! get spatial dimension (2 or 3?)
call DMGetDimension(globalMesh,dimPlex,ierr)
CHKERRQ(ierr)
write(6,*) 'dimension',dimPlex;flush(6)
call DMGetStratumSize(globalMesh,'depth',dimPlex,mesh_NcpElemsGlobal,ierr)
CHKERRQ(ierr)
! get number of IDs in face sets (for boundary conditions?)
call DMGetLabelSize(globalMesh,'Face Sets',mesh_Nboundaries,ierr)
CHKERRQ(ierr)
write(6,*) 'number of "Face Sets"',mesh_Nboundaries;flush(6)
call MPI_Bcast(mesh_Nboundaries,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
call MPI_Bcast(mesh_NcpElemsGlobal,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
call MPI_Bcast(dimPlex,1,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
@ -177,7 +169,9 @@ subroutine mesh_init(ip,el)
enddo
if (nFaceSets > 0) call ISRestoreIndicesF90(faceSetIS,pFaceSets,ierr)
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0,PETSC_COMM_WORLD,ierr)
! this read in function should ignore C and C++ style comments
! it is used for BC only?
if (worldrank == 0) then
j = 0
flag = .false.
@ -186,8 +180,8 @@ subroutine mesh_init(ip,el)
read(FILEUNIT,'(a512)') line
if (trim(line) == IO_EOF) exit ! skip empty lines
if (trim(line) == '$Elements') then
read(FILEUNIT,'(a512)') line
read(FILEUNIT,'(a512)') line
read(FILEUNIT,'(a512)') line ! number of elements (ignore)
read(FILEUNIT,'(a512)') line
flag = .true.
endif
if (trim(line) == '$EndElements') exit
@ -212,29 +206,25 @@ subroutine mesh_init(ip,el)
endif
call DMDestroy(globalMesh,ierr); CHKERRQ(ierr)
call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_Nelems,ierr)
call DMGetStratumSize(geomMesh,'depth',dimPlex,mesh_NcpElems,ierr)
CHKERRQ(ierr)
call DMGetStratumSize(geomMesh,'depth',0,mesh_Nnodes,ierr)
CHKERRQ(ierr)
mesh_NcpElems = mesh_Nelems
FE_Nips(FE_geomtype(1_pInt)) = FEM_Zoo_nQuadrature(dimPlex,integrationOrder)
mesh_maxNnodes = FE_Nnodes(1_pInt)
mesh_maxNips = FE_Nips(1_pInt)
call mesh_FEM_build_ipCoordinates(dimPlex,FEM_Zoo_QuadraturePoints(dimPlex,integrationOrder)%p)
call mesh_FEM_build_ipVolumes(dimPlex)
allocate (mesh_element (4_pInt+mesh_maxNnodes,mesh_NcpElems)); mesh_element = 0_pInt
allocate (mesh_element (4_pInt,mesh_NcpElems)); mesh_element = 0_pInt
do j = 1, mesh_NcpElems
mesh_element( 1,j) = j
mesh_element( 2,j) = 1_pInt ! elem type
mesh_element( 1,j) = -1_pInt ! DEPRECATED
mesh_element( 2,j) = mesh_elemType ! elem type
mesh_element( 3,j) = 1_pInt ! homogenization
call DMGetLabelValue(geomMesh,'material',j-1,mesh_element(4,j),ierr)
CHKERRQ(ierr)
end do
if (usePingPong .and. (mesh_Nelems /= mesh_NcpElems)) &
call IO_error(600_pInt) ! ping-pong must be disabled when having non-DAMASK elements
if (debug_e < 1 .or. debug_e > mesh_NcpElems) &
call IO_error(602_pInt,ext_msg='element') ! selected element does not exist
if (debug_i < 1 .or. debug_i > FE_Nips(FE_geomtype(mesh_element(2_pInt,debug_e)))) &
@ -245,10 +235,14 @@ subroutine mesh_init(ip,el)
allocate(FEsolving_execIP(2_pInt,mesh_NcpElems)); FEsolving_execIP = 1_pInt ! parallel loop bounds set to comprise from first IP...
forall (j = 1_pInt:mesh_NcpElems) FEsolving_execIP(2,j) = FE_Nips(FE_geomtype(mesh_element(2,j))) ! ...up to own IP count for each element
if (allocated(calcMode)) deallocate(calcMode)
allocate(calcMode(mesh_maxNips,mesh_NcpElems))
calcMode = .false. ! pretend to have collected what first call is asking (F = I)
calcMode(ip,el) = .true. ! first ip,el needs to be already pingponged to "calc"
!!!! COMPATIBILITY HACK !!!!
! for a homogeneous mesh, all elements have the same number of IPs and and cell nodes.
! hence, xxPerElem instead of maxXX
mesh_NipsPerElem = mesh_maxNips
! better name
mesh_homogenizationAt = mesh_element(3,:)
mesh_microstructureAt = mesh_element(4,:)
!!!!!!!!!!!!!!!!!!!!!!!!
end subroutine mesh_init

View File

@ -26,9 +26,8 @@ module numerics
worldsize = 0_pInt !< MPI worldsize (/=0 for MPI simulations only)
integer(4), protected, public :: &
DAMASK_NumThreadsInt = 0 !< value stored in environment variable DAMASK_NUM_THREADS, set to zero if no OpenMP directive
integer(pInt), public :: &
numerics_integrationMode = 0_pInt !< integrationMode 1 = central solution; integrationMode 2 = perturbation, Default 0: undefined, is not read from file
integer(pInt), dimension(2) , protected, public :: &
!< ToDo: numerics_integrator is an array for historical reasons, only element 1 is used!
integer(pInt), dimension(2), protected, public :: &
numerics_integrator = 1_pInt !< method used for state integration (central & perturbed state), Default 1: fix-point iteration for both states
real(pReal), protected, public :: &
relevantStrain = 1.0e-7_pReal, & !< strain increment considered significant (used by crystallite to determine whether strain inc is considered significant)
@ -128,12 +127,7 @@ module numerics
#ifdef FEM
integer(pInt), protected, public :: &
integrationOrder = 2_pInt, & !< order of quadrature rule required
structOrder = 2_pInt, & !< order of displacement shape functions
thermalOrder = 2_pInt, & !< order of temperature field shape functions
damageOrder = 2_pInt, & !< order of damage field shape functions
vacancyfluxOrder = 2_pInt, & !< order of vacancy concentration and chemical potential field shape functions
porosityOrder = 2_pInt, & !< order of porosity field shape functions
hydrogenfluxOrder = 2_pInt !< order of hydrogen concentration and chemical potential field shape functions
structOrder = 2_pInt !< order of displacement shape functions
logical, protected, public :: &
BBarStabilisation = .false.
character(len=4096), protected, public :: &
@ -147,40 +141,7 @@ module numerics
&-mech_pc_type ml &
&-mech_mg_levels_ksp_type chebyshev &
&-mech_mg_levels_pc_type sor &
&-mech_pc_ml_nullspace user &
&-damage_snes_type vinewtonrsls &
&-damage_snes_atol 1e-8 &
&-damage_ksp_type preonly &
&-damage_ksp_max_it 25 &
&-damage_pc_type cholesky &
&-damage_pc_factor_mat_solver_package mumps &
&-thermal_snes_type newtonls &
&-thermal_snes_linesearch_type cp &
&-thermal_ksp_type fgmres &
&-thermal_ksp_max_it 25 &
&-thermal_snes_atol 1e-3 &
&-thermal_pc_type hypre &
&-vacancy_snes_type newtonls &
&-vacancy_snes_linesearch_type cp &
&-vacancy_snes_atol 1e-9 &
&-vacancy_ksp_type fgmres &
&-vacancy_ksp_max_it 25 &
&-vacancy_pc_type ml &
&-vacancy_mg_levels_ksp_type chebyshev &
&-vacancy_mg_levels_pc_type sor &
&-porosity_snes_type newtonls &
&-porosity_snes_atol 1e-8 &
&-porosity_ksp_type fgmres &
&-porosity_ksp_max_it 25 &
&-porosity_pc_type hypre &
&-hydrogen_snes_type newtonls &
&-hydrogen_snes_linesearch_type cp &
&-hydrogen_snes_atol 1e-9 &
&-hydrogen_ksp_type fgmres &
&-hydrogen_ksp_max_it 25 &
&-hydrogen_pc_type ml &
&-hydrogen_mg_levels_ksp_type chebyshev &
&-hydrogen_mg_levels_pc_type sor ', &
&-mech_pc_ml_nullspace user ',&
petsc_options = ''
#endif
@ -231,8 +192,6 @@ subroutine numerics_init
tag ,&
line
!$ character(len=6) DAMASK_NumThreadsString ! environment variable DAMASK_NUM_THREADS
external :: &
PETScErrorF ! is called in the CHKERRQ macro
#ifdef PETSc
call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,ierr);CHKERRQ(ierr)
@ -314,9 +273,7 @@ subroutine numerics_init
case ('atol_crystallitestress')
aTol_crystalliteStress = IO_floatValue(line,chunkPos,2_pInt)
case ('integrator')
numerics_integrator(1) = IO_intValue(line,chunkPos,2_pInt)
case ('integratorstiffness')
numerics_integrator(2) = IO_intValue(line,chunkPos,2_pInt)
numerics_integrator = IO_intValue(line,chunkPos,2_pInt)
case ('usepingpong')
usepingpong = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
case ('timesyncing')
@ -461,16 +418,6 @@ subroutine numerics_init
integrationorder = IO_intValue(line,chunkPos,2_pInt)
case ('structorder')
structorder = IO_intValue(line,chunkPos,2_pInt)
case ('thermalorder')
thermalorder = IO_intValue(line,chunkPos,2_pInt)
case ('damageorder')
damageorder = IO_intValue(line,chunkPos,2_pInt)
case ('vacancyfluxorder')
vacancyfluxOrder = IO_intValue(line,chunkPos,2_pInt)
case ('porosityorder')
porosityOrder = IO_intValue(line,chunkPos,2_pInt)
case ('hydrogenfluxorder')
hydrogenfluxOrder = IO_intValue(line,chunkPos,2_pInt)
case ('petsc_options')
petsc_options = trim(line(chunkPos(4):))
case ('bbarstabilisation')
@ -482,7 +429,7 @@ subroutine numerics_init
#endif
case default ! found unknown keyword
call IO_error(300_pInt,ext_msg=tag)
endselect
end select
enddo
close(FILEUNIT)
@ -555,7 +502,7 @@ subroutine numerics_init
!--------------------------------------------------------------------------------------------------
! Random seeding parameter
write(6,'(a24,1x,i16,/)') ' random_seed: ',randomSeed
write(6,'(a16,1x,i16,/)') ' random_seed: ',randomSeed
if (randomSeed <= 0_pInt) &
write(6,'(a,/)') ' random seed will be generated!'
@ -623,11 +570,6 @@ subroutine numerics_init
#ifdef FEM
write(6,'(a24,1x,i8)') ' integrationOrder: ',integrationOrder
write(6,'(a24,1x,i8)') ' structOrder: ',structOrder
write(6,'(a24,1x,i8)') ' thermalOrder: ',thermalOrder
write(6,'(a24,1x,i8)') ' damageOrder: ',damageOrder
write(6,'(a24,1x,i8)') ' vacancyfluxOrder: ',vacancyfluxOrder
write(6,'(a24,1x,i8)') ' porosityOrder: ',porosityOrder
write(6,'(a24,1x,i8)') ' hydrogenfluxOrder: ',hydrogenfluxOrder
write(6,'(a24,1x,a)') ' PETSc_options: ',trim(petsc_defaultOptions)//' '//trim(petsc_options)
write(6,'(a24,1x,L8)') ' B-Bar stabilisation: ',BBarStabilisation
#endif

View File

@ -2382,8 +2382,7 @@ use, intrinsic :: &
use prec, only: dNeq0, &
dNeq, &
dEq0
use numerics, only: numerics_integrationMode, &
numerics_timeSyncing
use numerics, only: numerics_timeSyncing
use IO, only: IO_error
use debug, only: debug_level, &
debug_constitutive, &
@ -2942,14 +2941,12 @@ rhoDot = rhoDotFlux &
+ rhoDotAthermalAnnihilation &
+ rhoDotThermalAnnihilation
if (numerics_integrationMode == 1_pInt) then ! save rates for output if in central integration mode
rhoDotFluxOutput(1:ns,1:8,1_pInt,ip,el) = rhoDotFlux(1:ns,1:8)
rhoDotMultiplicationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotMultiplication(1:ns,[1,3])
rhoDotSingle2DipoleGlideOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotSingle2DipoleGlide(1:ns,9:10)
rhoDotAthermalAnnihilationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotAthermalAnnihilation(1:ns,9:10)
rhoDotThermalAnnihilationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotThermalAnnihilation(1:ns,9:10)
rhoDotEdgeJogsOutput(1:ns,1_pInt,ip,el) = 2.0_pReal * rhoDotThermalAnnihilation(1:ns,1)
endif
rhoDotFluxOutput(1:ns,1:8,1_pInt,ip,el) = rhoDotFlux(1:ns,1:8)
rhoDotMultiplicationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotMultiplication(1:ns,[1,3])
rhoDotSingle2DipoleGlideOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotSingle2DipoleGlide(1:ns,9:10)
rhoDotAthermalAnnihilationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotAthermalAnnihilation(1:ns,9:10)
rhoDotThermalAnnihilationOutput(1:ns,1:2,1_pInt,ip,el) = rhoDotThermalAnnihilation(1:ns,9:10)
rhoDotEdgeJogsOutput(1:ns,1_pInt,ip,el) = 2.0_pReal * rhoDotThermalAnnihilation(1:ns,1)
#ifdef DEBUG

File diff suppressed because it is too large Load Diff

View File

@ -87,16 +87,6 @@ module prec
integer(pInt), pointer, dimension(:,:,:) :: p
end type
#ifdef FEM
type, public :: tOutputData
integer(pInt) :: &
sizeIpCells = 0_pInt , &
sizeResults = 0_pInt
real(pReal), allocatable, dimension(:,:) :: &
output !< output data
end type
#endif
public :: &
prec_init, &
dEq, &

44
src/quit.f90 Normal file
View File

@ -0,0 +1,44 @@
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief quit subroutine
!> @details exits the program and reports current time and duration. Exit code 0 signals
!> everything is fine. Exit code 1 signals an error, message according to IO_error. Exit code
!> 2 signals no severe problems, but some increments did not converge
!--------------------------------------------------------------------------------------------------
subroutine quit(stop_id)
#include <petsc/finclude/petscsys.h>
#ifdef _OPENMP
use MPI, only: &
MPI_finalize
#endif
use prec, only: &
pInt
use PetscSys
implicit none
integer(pInt), intent(in) :: stop_id
integer, dimension(8) :: dateAndTime ! type default integer
integer(pInt) :: error = 0_pInt
PetscErrorCode :: ierr = 0
call PETScFinalize(ierr)
CHKERRQ(ierr)
#ifdef _OPENMP
call MPI_finalize(error)
if (error /= 0) write(6,'(a)') ' Error in MPI_finalize'
#endif
call date_and_time(values = dateAndTime)
write(6,'(/,a)') 'DAMASK terminated on:'
write(6,'(a,2(i2.2,a),i4.4)') 'Date: ',dateAndTime(3),'/',&
dateAndTime(2),'/',&
dateAndTime(1)
write(6,'(a,2(i2.2,a),i2.2)') 'Time: ',dateAndTime(5),':',&
dateAndTime(6),':',&
dateAndTime(7)
if (stop_id == 0_pInt .and. ierr == 0_pInt .and. error == 0_pInt) stop 0 ! normal termination
if (stop_id == 2_pInt .and. ierr == 0_pInt .and. error == 0_pInt) stop 2 ! not all incs converged
stop 1 ! error (message from IO_error)
end subroutine quit

View File

@ -50,8 +50,6 @@ module spectral_damage
spectral_damage_init, &
spectral_damage_solution, &
spectral_damage_forward
external :: &
PETScErrorF ! is called in the CHKERRQ macro
contains
@ -85,11 +83,6 @@ subroutine spectral_damage_init()
Vec :: uBound, lBound
PetscErrorCode :: ierr
character(len=100) :: snes_type
external :: &
SNESSetOptionsPrefix, &
SNESGetType, &
DMDAGetCorners, &
DMDASNESSetFunctionLocal
write(6,'(/,a)') ' <<<+- spectral_damage init -+>>>'
write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, volume in press, '
@ -194,11 +187,6 @@ type(tSolutionState) function spectral_damage_solution(timeinc,timeinc_old,loadC
PetscErrorCode :: ierr
SNESConvergedReason :: reason
external :: &
VecMin, &
VecMax, &
SNESSolve
spectral_damage_solution%converged =.false.
!--------------------------------------------------------------------------------------------------

View File

@ -66,8 +66,6 @@ module spectral_mech_basic
basic_init, &
basic_solution, &
basic_forward
external :: &
PETScErrorF ! is called in the CHKERRQ macro
contains
@ -119,11 +117,6 @@ subroutine basic_init
integer(pInt) :: proc
character(len=1024) :: rankStr
external :: &
SNESSetOptionsPrefix, &
SNESSetConvergenceTest, &
DMDASNESSetFunctionLocal
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverBasic init -+>>>'
write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity, 66:3145, 2015'
write(6,'(a,/)') ' https://doi.org/10.1016/j.ijplas.2014.02.006'
@ -246,9 +239,6 @@ type(tSolutionState) function basic_solution(incInfoIn,timeinc,timeinc_old,stres
PetscErrorCode :: ierr
SNESConvergedReason :: reason
external :: &
SNESsolve
incInfo = incInfoIn
!--------------------------------------------------------------------------------------------------

View File

@ -73,8 +73,6 @@ module spectral_mech_Polarisation
Polarisation_init, &
Polarisation_solution, &
Polarisation_forward
external :: &
PETScErrorF ! is called in the CHKERRQ macro
contains
@ -130,11 +128,6 @@ subroutine Polarisation_init
integer(pInt) :: proc
character(len=1024) :: rankStr
external :: &
SNESSetOptionsPrefix, &
SNESSetConvergenceTest, &
DMDASNESsetFunctionLocal
write(6,'(/,a)') ' <<<+- DAMASK_spectral_solverPolarisation init -+>>>'
write(6,'(/,a)') ' Shanthraj et al., International Journal of Plasticity, 66:3145, 2015'
write(6,'(a,/)') ' https://doi.org/10.1016/j.ijplas.2014.02.006'
@ -272,9 +265,6 @@ type(tSolutionState) function Polarisation_solution(incInfoIn,timeinc,timeinc_ol
PetscErrorCode :: ierr
SNESConvergedReason :: reason
external :: &
SNESSolve
incInfo = incInfoIn
!--------------------------------------------------------------------------------------------------

View File

@ -50,8 +50,6 @@ module spectral_thermal
spectral_thermal_init, &
spectral_thermal_solution, &
spectral_thermal_forward
external :: &
PETScErrorF ! is called in the CHKERRQ macro
contains
@ -88,11 +86,6 @@ subroutine spectral_thermal_init
PetscScalar, dimension(:,:,:), pointer :: x_scal
PetscErrorCode :: ierr
external :: &
SNESsetOptionsPrefix, &
DMDAgetCorners, &
DMDASNESsetFunctionLocal
write(6,'(/,a)') ' <<<+- spectral_thermal init -+>>>'
write(6,'(/,a)') ' Shanthraj et al., Handbook of Mechanics of Materials, volume in press,'
write(6,'(/,a)') ' chapter Spectral Solvers for Crystal Plasticity and Multi-Physics Simulations. Springer, 2018'
@ -196,11 +189,6 @@ type(tSolutionState) function spectral_thermal_solution(timeinc,timeinc_old,load
PetscErrorCode :: ierr
SNESConvergedReason :: reason
external :: &
VecMin, &
VecMax, &
SNESSolve
spectral_thermal_solution%converged =.false.
!--------------------------------------------------------------------------------------------------

View File

@ -146,8 +146,6 @@ module spectral_utilities
FIELD_DAMAGE_ID
private :: &
utilities_getFreqDerivative
external :: &
PETScErrorF ! is called in the CHKERRQ macro
contains
@ -209,8 +207,6 @@ subroutine utilities_init()
scalarSize = 1_C_INTPTR_T, &
vecSize = 3_C_INTPTR_T, &
tensorSize = 9_C_INTPTR_T
external :: &
PetscOptionsInsertString
write(6,'(/,a)') ' <<<+- spectral_utilities init -+>>>'
write(6,'(/,a)') ' Eisenlohr et al., International Journal of Plasticity, 46:3753, 2013'