Merge remote-tracking branch 'origin/spectralSolver-cutbackfix' into 3-adding-plastic-constitutive-law-with-kinematic-hardening

This commit is contained in:
Zhuowen Zhao 2018-01-18 13:59:42 -05:00
commit 7c755a0241
50 changed files with 1780 additions and 2893 deletions

View File

@ -1 +1 @@
v2.0.1-976-g035215e
v2.0.1-1004-g2c4df2f

View File

@ -1 +1,23 @@
# Save by abaqususer on Thu May 12 10:22:10 2011
# Save by m.diehl on 2017_12_06-18.38.26; build 2017 2016_09_27-23.54.59 126836
from abaqus import *
upgradeMdb(
'/nethome/storage/raid4/m.diehl/DAMASK/examples/AbaqusStandard/SX_PX_compression-6.9-1.cae'
,
'/nethome/storage/raid4/m.diehl/DAMASK/examples/AbaqusStandard/SX_PX_compression.cae')
# Save by m.diehl on 2017_12_06-18.38.26; build 2017 2016_09_27-23.54.59 126836
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from optimization import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
mdb.jobs['Job_sx-px'].setValues(description='compression', userSubroutine=
'$HOME/DAMASK/src/DAMASK_abaqus_std.f')
# Save by m.diehl on 2017_12_06-18.39.44; build 2017 2016_09_27-23.54.59 126836

View File

@ -49,7 +49,7 @@ maxVolDiscrepancy_RGC 1.0e-5 # maximum allowable relative volume discr
volDiscrepancyMod_RGC 1.0e+12
discrepancyPower_RGC 5.0
fixed_seed 0 # put any number larger than zero, integer, if you want to have a pseudo random distribution
random_seed 0 # any integer larger than zero seeds the random generator, otherwise random seeding
## spectral parameters ##
err_div_tolAbs 1.0e-3 # absolute tolerance for fulfillment of stress equilibrium

File diff suppressed because it is too large Load Diff

View File

@ -16,3 +16,10 @@ patch -p1 < installation/patch/nameOfPatch
* **PETSc-3.8** adjusts all includes nad calls to PETSc to the 3.8.x API
This allows to use the current version of PETSc
## Create patch
commit your changes
```bash
git format-patch PATH_TO_COMPARE --stdout >
```

View File

@ -1,198 +0,0 @@
<?xml version="1.0"?>
<h5ds>
<!--Top level attributes-->
<history>
<type>attr</type>
<h5path>/</h5path>
<category></category>
<description>store cmd history</description>
</history>
<inc>
<type>attr</type>
<h5path>/</h5path>
<category></category>
<description></description>
</inc>
<!--Geometry section-->
<Vx>
<type>Scalar</type>
<h5path>/Geometry/Vx</h5path>
<category>Geometry</category>
<description>Vector along x define the spectral mesh</description>
</Vx>
<Vy>
<type>Scalar</type>
<h5path>/Geometry/Vy</h5path>
<category>Geometry</category>
<description>Vector along y defines the spectral mesh</description>
</Vy>
<Vz>
<type>Scalar</type>
<h5path>/Geometry/Vz</h5path>
<category>Geometry</category>
<description>Vector along z defines the spectral mesh</description>
</Vz>
<ip>
<type>Scalar</type>
<h5path>/Geometry/ip</h5path>
<category>Geometry</category>
<description></description>
</ip>
<node>
<type>Scalar</type>
<h5path>/Geometry/node</h5path>
<category>Geometry</category>
<description></description>
</node>
<grain>
<type>Scalar</type>
<h5path>/Geometry/grain</h5path>
<category>Geometry</category>
<description></description>
</grain>
<pos>
<type>Vector</type>
<h5path>/Geometry/pos</h5path>
<category>Geometry</category>
<description></description>
</pos>
<elem>
<type>Scalar</type>
<h5path>/Geometry/elem</h5path>
<category>Geometry</category>
<description></description>
</elem>
<!--Crystallite section-->
<phase>
<type>Scalar</type>
<h5path>/Crystallite/phase</h5path>
<category>Crystallite</category>
<description></description>
</phase>
<texture>
<type>Scalar</type>
<h5path>/Crystallite/texture</h5path>
<category>Crystallite</category>
<description></description>
</texture>
<volume>
<type>Scalar</type>
<h5path>/Crystallite/volume</h5path>
<category>Crystallite</category>
<description></description>
</volume>
<orientation>
<type>Vector</type>
<h5path>/Crystallite/orientation</h5path>
<category>Crystallite</category>
<description></description>
</orientation>
<eulerangles>
<type>Vector</type>
<h5path>/Crystallite/eulerangles</h5path>
<category>Crystallite</category>
<description>Bunnge Euler angles in degrees</description>
</eulerangles>
<grainrotation>
<type>Vector</type>
<h5path>/Crystallite/grainrotation</h5path>
<category>Crystallite</category>
<description></description>
</grainrotation>
<f>
<type>Tensor</type>
<h5path>/Crystallite/f</h5path>
<category>Crystallite</category>
<description>deformation gradient (F)</description>
</f>
<p>
<type>Tensor</type>
<h5path>/Crystallite/p</h5path>
<category>Crystallite</category>
<description>Pikola Kirkhoff stress</description>
</p>
<Cauchy>
<type>Tensor</type>
<h5path>/Crystallite/Cauchy</h5path>
<category>Crystallite</category>
<description>Cauchy stress tensor</description>
</Cauchy>
<lnV>
<type>Tensor</type>
<h5path>/Crystallite/lnV</h5path>
<category>Crystallite</category>
<description></description>
</lnV>
<MisesCauchy>
<type>Scalar</type>
<h5path>/Crystallite/MisesCauchy</h5path>
<category>Crystallite</category>
<description>von Mises equivalent of Cauchy stress</description>
</MisesCauchy>
<MiseslnV>
<type>Scalar</type>
<h5path>/Crystallite/MiseslnV</h5path>
<category>Crystallite</category>
<description>left von Mises strain</description>
</MiseslnV>
<!--Constitutive section-->
<resistance_slip>
<type>Vector</type>
<h5path>/Constitutive/resistance_slip</h5path>
<category>Constitutive</category>
<description></description>
</resistance_slip>
<shearrate_slip>
<type>Vector</type>
<h5path>/Constitutive/shearrate_slip</h5path>
<category>Constitutive</category>
<description></description>
</shearrate_slip>
<resolvedstress_slip>
<type>Vector</type>
<h5path>/Constitutive/resolvedstress_slip</h5path>
<category>Constitutive</category>
<description></description>
</resolvedstress_slip>
<totalshear>
<type>Scalar</type>
<h5path>/Constitutive/totalshear</h5path>
<category>Constitutive</category>
<description></description>
</totalshear>
<accumulatedshear_slip>
<type>Matrix</type>
<h5path>/Constitutive/accumulatedshear_slip</h5path>
<category>Constitutive</category>
<description>vector contains accumulated shear per slip system</description>
</accumulatedshear_slip>
<!--Derived section-->
</h5ds>

View File

@ -1,31 +1,13 @@
# -*- coding: UTF-8 no BOM -*-
"""Main aggregator"""
import os,sys,time
h5py_flag = os.path.join(os.path.dirname(__file__),'../../.noH5py')
h5py_grace = 7200 # only complain once every 7200 sec (2 hours)
h5py_msg = "h5py module not found."
now = time.time()
import os
with open(os.path.join(os.path.dirname(__file__),'../../VERSION')) as f:
version = f.readline()[:-1]
from .environment import Environment # noqa
from .asciitable import ASCIItable # noqa
try:
from .h5table import H5Table # noqa
if os.path.exists(h5py_flag): os.remove(h5py_flag) # delete flagging file on success
except ImportError:
if os.path.exists(h5py_flag):
if now - os.path.getmtime(h5py_flag) > h5py_grace: # complain (again) every so-and-so often
sys.stderr.write(h5py_msg+'\n')
with open(h5py_flag, 'a'):
os.utime(h5py_flag,(now,now)) # update flag modification time to "now"
else:
open(h5py_flag, 'a').close() # create flagging file
sys.stderr.write(h5py_msg+'\n') # complain for the first time
from .config import Material # noqa
from .colormaps import Colormap, Color # noqa

View File

@ -1,146 +0,0 @@
# -*- coding: UTF-8 no BOM -*-
# ----------------------------------------------------------- #
# Ideally the h5py should be enough to serve as the data #
# interface for future DAMASK, but since we are still not #
# sure when this major shift will happen, it seems to be a #
# good idea to provide a interface class that help user ease #
# into using HDF5 as the new daily storage driver. #
# ----------------------------------------------------------- #
import os
import h5py
import numpy as np
import xml.etree.cElementTree as ET
# ---------------------------------------------------------------- #
# python 3 has no unicode object, this ensures that the code works #
# on Python 2&3 #
# ---------------------------------------------------------------- #
try:
test = isinstance('test', unicode)
except(NameError):
unicode = str
def lables_to_path(label, dsXMLPath=None):
"""Read the XML definition file and return the path."""
if dsXMLPath is None:
# use the default storage layout in DS_HDF5.xml
if "h5table.pyc" in __file__:
dsXMLPath = os.path.abspath(__file__).replace("h5table.pyc",
"DS_HDF5.xml")
else:
dsXMLPath = os.path.abspath(__file__).replace("h5table.py",
"DS_HDF5.xml")
# This current implementation requires that all variables
# stay under the root node, the nesting is defined through the
# h5path.
# Allow new derived data to be put under the root
tree = ET.parse(dsXMLPath)
try:
dataType = tree.find('{}/type'.format(label)).text
h5path = tree.find('{}/h5path'.format(label)).text
except:
dataType = "Scalar"
h5path = "/{}".format(label) # just put it under root
return (dataType, h5path)
class H5Table(object):
"""
Lightweight interface class for h5py
DESCRIPTION
-----------
Interface/wrapper class for manipulating data in HDF5 with DAMASK
specialized data structure.
--> try to maintain a minimal API design.
PARAMETERS
----------
h5f_path: str
Absolute path of the HDF5 file
METHOD
------
del_entry() -- Force delete attributes/group/datasets (dangerous)
get_attr() -- Return attributes if possible
add_attr() -- Add NEW attributes to dataset/group (no force overwrite)
get_data() -- Retrieve data in numpy.ndarray
add_data() -- Add dataset to H5 file
get_cmdlog() -- Return the command used to generate the data if possible
NOTE
----
1. As an interface class, it uses the lazy evaluation design
that reads the data only when it is absolutely necessary.
2. The command line used to generate each new feature is stored with
each dataset as dataset attribute.
"""
def __init__(self, h5f_path, new_file=False, dsXMLFile=None):
self.h5f_path = h5f_path
self.dsXMLFile = dsXMLFile
msg = 'Created by H5Talbe from DAMASK'
mode = 'w' if new_file else 'a'
with h5py.File(self.h5f_path, mode) as h5f:
h5f['/'].attrs['description'] = msg
def del_entry(self, feature_name):
"""Delete entry in HDF5 table"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
del h5f[h5f_path]
def get_attr(self, attr_name):
dataType, h5f_path = lables_to_path(attr_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
rst_attr = h5f[h5f_path].attrs[attr_name]
return rst_attr
def add_attr(self, attr_name, attr_data):
dataType, h5f_path = lables_to_path(attr_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
h5f[h5f_path].attrs[attr_name] = attr_data
h5f.flush()
def get_data(self, feature_name=None):
"""Extract dataset from HDF5 table and return it in a numpy array"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
h5f_dst = h5f[h5f_path] # get the handle for target dataset(table)
rst_data = np.zeros(h5f_dst.shape)
h5f_dst.read_direct(rst_data)
return rst_data
def add_data(self, feature_name, dataset, cmd_log=None):
"""Adding new feature into existing HDF5 file"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
# NOTE:
# --> If dataset exists, delete the old one so as to write
# a new one. For brand new dataset. For brand new one,
# record its state as fresh in the cmd log.
try:
del h5f[h5f_path]
print("***deleting old {} from {}".format(feature_name,self.h5f_path))
except:
# if no cmd log, None will used
cmd_log = str(cmd_log) + " [FRESH]"
h5f.create_dataset(h5f_path, data=dataset)
# store the cmd in log is possible
if cmd_log is not None:
h5f[h5f_path].attrs['log'] = str(cmd_log)
h5f.flush()
def get_cmdlog(self, feature_name):
"""Get cmd history used to generate the feature"""
dataType, h5f_path = lables_to_path(feature_name,
dsXMLPath=self.dsXMLFile)
with h5py.File(self.h5f_path, 'a') as h5f:
cmd_logs = h5f[h5f_path].attrs['log']
return cmd_logs

121
processing/post/addDerivative.py Executable file
View File

@ -0,0 +1,121 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os,sys
import numpy as np
from optparse import OptionParser
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def derivative(coordinates,what):
result = np.empty_like(what)
# use differentiation by interpolation
# as described in http://www2.math.umd.edu/~dlevy/classes/amsc466/lecture-notes/differentiation-chap.pdf
result[1:-1,:] = + what[1:-1,:] * (2.*coordinates[1:-1]-coordinates[:-2]-coordinates[2:]) / \
((coordinates[1:-1]-coordinates[:-2])*(coordinates[1:-1]-coordinates[2:])) \
+ what[2:,:] * (coordinates[1:-1]-coordinates[:-2]) / \
((coordinates[2:]-coordinates[1:-1])*(coordinates[2:]-coordinates[:-2])) \
+ what[:-2,:] * (coordinates[1:-1]-coordinates[2:]) / \
((coordinates[:-2]-coordinates[1:-1])*(coordinates[:-2]-coordinates[2:])) \
result[0,:] = (what[0,:] - what[1,:]) / \
(coordinates[0] - coordinates[1])
result[-1,:] = (what[-1,:] - what[-2,:]) / \
(coordinates[-1] - coordinates[-2])
return result
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
Add column(s) containing numerical derivative of requested column(s) with respect to given coordinates.
""", version = scriptID)
parser.add_option('-c','--coordinates',
dest = 'coordinates',
type = 'string', metavar='string',
help = 'heading of coordinate column')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'heading of column(s) to differentiate')
(options,filenames) = parser.parse_args()
if options.coordinates is None:
parser.error('no coordinate column specified.')
if options.label is None:
parser.error('no data column specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
remarks = []
columns = []
dims = []
if table.label_dimension(options.coordinates) != 1:
errors.append('coordinate column {} is not scalar.'.format(options.coordinates))
for what in options.label:
dim = table.label_dimension(what)
if dim < 0: remarks.append('column {} not found...'.format(what))
else:
dims.append(dim)
columns.append(table.label_index(what))
table.labels_append('d({})/d({})'.format(what,options.coordinates) if dim == 1 else
['{}_d({})/d({})'.format(i+1,what,options.coordinates) for i in range(dim)] ) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
table.data_readArray()
mask = []
for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to differentiate
differentiated = derivative(table.data[:,table.label_index(options.coordinates)].reshape((len(table.data),1)),
table.data[:,mask]) # calculate numerical derivative
table.data = np.hstack((table.data,differentiated))
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray()
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -1,72 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
# import re
# import sys
import collections
# import math
import damask
# import numpy as np
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# ----- Helper functions ----- #
def listify(x):
return x if isinstance(x, collections.Iterable) else [x]
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
usageEx = """
usage_in_details:
Column labels are tagged by '#label#' in formulas.
Use ';' for ',' in functions. Numpy is available as 'np'.
Special variables: #_row_# -- row index
Examples:
(1) magnitude of vector -- "np.linalg.norm(#vec#)"
(2) rounded root of row number -- "round(math.sqrt(#_row_#);3)"
"""
desp = "Add or alter column(s) with derived values according to "
desp += "user-defined arithmetic operation between column(s)."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]' + usageEx,
description=desp,
version=scriptID)
parser.add_option('-l', '--label',
dest='labels',
action='extend', metavar='<string LIST>',
help='(list of) new column labels')
parser.add_option('-f', '--formula',
dest='formulas',
action='extend', metavar='<string LIST>',
help='(list of) formulas corresponding to labels')
parser.add_option('-c', '--condition',
dest='condition', metavar='string',
help='condition to filter rows')
parser.set_defaults(condition=None)
(options, filenames) = parser.parse_args()
# ----- parse formulas ----- #
for i in range(len(options.formulas)):
options.formulas[i] = options.formulas[i].replace(';', ',')
# ----- loop over input files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
print("!!!Cannot process {}".format(name))
continue
damask.util.report(scriptName, name)
# Note:
# --> not immediately needed, come back later

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
import damask
import numpy as np
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
def getCauchy(f, p):
"""Return Cauchy stress for given f and p"""
# [Cauchy] = (1/det(F)) * [P].[F_transpose]
f = f.reshape((3, 3))
p = p.reshape((3, 3))
return 1.0/np.linalg.det(f)*np.dot(p, f.T).reshape(9)
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
desp = "Add column(s) containing Cauchy stress based on given column(s)"
desp += "of deformation gradient and first Piola--Kirchhoff stress."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=desp,
version=scriptID)
parser.add_option('-f', '--defgrad',
dest='defgrad',
type='string', metavar='string',
help='heading for deformation gradient [%default]')
parser.add_option('-p', '--stress',
dest='stress',
type='string', metavar='string',
help='heading for first Piola--Kirchhoff stress [%default]')
parser.set_defaults(defgrad='f',
stress='p')
(options, filenames) = parser.parse_args()
# ----- loop over input H5 files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
continue
damask.util.report(scriptName, name)
# ----- read in data ----- #
f = h5f.get_data("f")
p = h5f.get_data("p")
# ----- calculate Cauchy stress ----- #
cauchy = [getCauchy(f_i, p_i) for f_i, p_i in zip(f, p)]
# ----- write to HDF5 file ----- #
cmd_log = " ".join([scriptID, name])
h5f.add_data('Cauchy', np.array(cauchy), cmd_log=cmd_log)

View File

@ -1,145 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
import sys
import math
import damask
import numpy as np
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# TODO
# This implementation will have to iterate through the array one
# element at a time, maybe there are some other ways to make this
# faster.
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
desp = "Add RGB color value corresponding to TSL-OIM scheme for IPF."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=desp,
version=scriptID)
parser.add_option('-p', '--pole',
dest='pole',
type='float', nargs=3, metavar='float float float',
help='lab frame direction for IPF [%default]')
msg = ', '.join(damask.Symmetry.lattices[1:])
parser.add_option('-s', '--symmetry',
dest='symmetry',
type='choice', choices=damask.Symmetry.lattices[1:],
metavar='string',
help='crystal symmetry [%default] {{{}}} '.format(msg))
parser.add_option('-e', '--eulers',
dest='eulers',
type='string', metavar='string',
help='Euler angles label')
parser.add_option('-d', '--degrees',
dest='degrees',
action='store_true',
help='Euler angles are given in degrees [%default]')
parser.add_option('-m', '--matrix',
dest='matrix',
type='string', metavar='string',
help='orientation matrix label')
parser.add_option('-a',
dest='a',
type='string', metavar='string',
help='crystal frame a vector label')
parser.add_option('-b',
dest='b',
type='string', metavar='string',
help='crystal frame b vector label')
parser.add_option('-c',
dest='c',
type='string', metavar='string',
help='crystal frame c vector label')
parser.add_option('-q', '--quaternion',
dest='quaternion',
type='string', metavar='string',
help='quaternion label')
parser.set_defaults(pole=(0.0, 0.0, 1.0),
symmetry=damask.Symmetry.lattices[-1],
degrees=False)
(options, filenames) = parser.parse_args()
# safe guarding to have only one orientation representation
# use dynamic typing to group a,b,c into frame
options.frame = [options.a, options.b, options.c]
input = [options.eulers is not None,
all(options.frame),
options.matrix is not None,
options.quaternion is not None]
if np.sum(input) != 1:
parser.error('needs exactly one input format.')
# select input label that was requested (active)
label_active = np.where(input)[0][0]
(label, dim, inputtype) = [(options.eulers, 3, 'eulers'),
(options.frame, [3, 3, 3], 'frame'),
(options.matrix, 9, 'matrix'),
(options.quaternion, 4, 'quaternion')][label_active]
# rescale degrees to radians
toRadians = math.pi/180.0 if options.degrees else 1.0
# only use normalized pole
pole = np.array(options.pole)
pole /= np.linalg.norm(pole)
# ----- Loop over input files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
continue
damask.util.report(scriptName, name)
# extract data from HDF5 file
if inputtype == 'eulers':
orieData = h5f.get_data(label)
elif inputtype == 'matrix':
orieData = h5f.get_data(label)
orieData = orieData.reshape(orieData.shape[0], 3, 3)
elif inputtype == 'frame':
vctr_a = h5f.get_data(label[0])
vctr_b = h5f.get_data(label[1])
vctr_c = h5f.get_data(label[2])
frame = np.column_stack((vctr_a, vctr_b, vctr_c))
orieData = frame.reshape(frame.shape[0], 3, 3)
elif inputtype == 'quaternion':
orieData = h5f.get_data(label)
# calculate the IPF color
rgbArrays = np.zeros((orieData.shape[0], 3))
for ci in range(rgbArrays.shape[0]):
if inputtype == 'eulers':
o = damask.Orientation(Eulers=np.array(orieData[ci, :])*toRadians,
symmetry=options.symmetry).reduced()
elif inputtype == 'matrix':
o = damask.Orientation(matrix=orieData[ci, :, :].transpose(),
symmetry=options.symmetry).reduced()
elif inputtype == 'frame':
o = damask.Orientation(matrix=orieData[ci, :, :],
symmetry=options.symmetry).reduced()
elif inputtype == 'quaternion':
o = damask.Orientation(quaternion=orieData[ci, :],
symmetry=options.symmetry).reduced()
rgbArrays[ci, :] = o.IPFcolor(pole)
# compose labels/headers for IPF color (RGB)
labelIPF = 'IPF_{:g}{:g}{:g}_{sym}'.format(*options.pole,
sym=options.symmetry.lower())
# compose cmd history (go with dataset)
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
# write data to HDF5 file
h5f.add_data(labelIPF, rgbArrays, cmd_log=cmd_log)

View File

@ -1,85 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
import sys
import math
import damask
import numpy as np
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# ----- Helper functions ----- #
def calcMises(what, tensor):
"""Calculate von Mises equivalent"""
dev = tensor - np.trace(tensor)/3.0*np.eye(3)
symdev = 0.5*(dev+dev.T)
return math.sqrt(np.sum(symdev*symdev.T) *
{
'stress': 3.0/2.0,
'strain': 2.0/3.0,
}[what.lower()])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
desp = "Add von Mises equivalent values for symmetric part of requested"
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=desp,
version=scriptID)
parser.add_option('-e', '--strain',
dest='strain',
metavar='string',
help='name of dataset containing strain tensors')
parser.add_option('-s', '--stress',
dest='stress',
metavar='string',
help='name of dataset containing stress tensors')
parser.set_defaults(strain=None, stress=None)
(options, filenames) = parser.parse_args()
# ----- Loop over input files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
continue
damask.util.report(scriptName, name)
# TODO:
# Could use some refactoring here
if options.stress is not None:
# extract stress tensor from HDF5
tnsr = h5f.get_data(options.stress)
# calculate von Mises equivalent row by row
vmStress = np.zeros(tnsr.shape[0])
for ri in range(tnsr.shape[0]):
stressTnsr = tnsr[ri, :].reshape(3, 3)
vmStress[ri] = calcMises('stress', stressTnsr)
# compose label
label = "Mises{}".format(options.stress)
# prepare log info
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
# write data to HDF5 file
h5f.add_data(label, vmStress, cmd_log=cmd_log)
if options.strain is not None:
tnsr = h5f.get_data(options.strain)
vmStrain = np.zeros(tnsr.shape[0])
for ri in range(tnsr.shape[0]):
strainTnsr = tnsr[ri, :].reshape(3, 3)
vmStrain[ri] = calcMises('strain', strainTnsr)
label = "Mises{}".format(options.strain)
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
h5f.add_data(label, vmStrain, cmd_log=cmd_log)

View File

@ -1,156 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
import sys
import damask
import numpy as np
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# ----- Helper functions ----- #
def operator(stretch, strain, eigenvalues):
# Albrecht Bertram: Elasticity and Plasticity of Large Deformations
# An Introduction (3rd Edition, 2012), p. 102
return {'V#ln': np.log(eigenvalues),
'U#ln': np.log(eigenvalues),
'V#Biot': (np.ones(3, 'd') - 1.0/eigenvalues),
'U#Biot': (eigenvalues - np.ones(3, 'd')),
'V#Green': (np.ones(3, 'd') - 1.0/eigenvalues/eigenvalues)*0.5,
'U#Green': (eigenvalues*eigenvalues - np.ones(3, 'd'))*0.5,
}[stretch+'#'+strain]
def calcEPS(defgrads, stretchType, strainType):
"""Calculate specific type of strain tensor"""
eps = np.zeros(defgrads.shape) # initialize container
# TODO:
# this loop can use some performance boost
# (multi-threading?)
for ri in range(defgrads.shape[0]):
f = defgrads[ri, :].reshape(3, 3)
U, S, Vh = np.linalg.svd(f)
R = np.dot(U, Vh) # rotation of polar decomposition
if stretchType == 'U':
stretch = np.dot(np.linalg.inv(R), f) # F = RU
elif stretchType == 'V':
stretch = np.dot(f, np.linalg.inv(R)) # F = VR
# kill nasty noisy data
stretch = np.where(abs(stretch) < 1e-12, 0, stretch)
(D, V) = np.linalg.eig(stretch)
# flip principal component with negative Eigen values
neg = np.where(D < 0.0)
D[neg] *= -1.
V[:, neg] *= -1.
# check each vector for orthogonality
# --> brutal force enforcing orthogonal base
# and re-normalize
for i, eigval in enumerate(D):
if np.dot(V[:, i], V[:, (i+1) % 3]) != 0.0:
V[:, (i+1) % 3] = np.cross(V[:, (i+2) % 3], V[:, i])
V[:, (i+1) % 3] /= np.sqrt(np.dot(V[:, (i+1) % 3],
V[:, (i+1) % 3].conj()))
# calculate requested version of strain tensor
d = operator(stretchType, strainType, D)
eps[ri] = (np.dot(V, np.dot(np.diag(d), V.T)).real).reshape(9)
return eps
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
desp = "Add column(s) containing given strains based on given stretches"
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=desp,
version=scriptID)
msg = 'material strains based on right Cauchy-Green deformation, i.e., C and U'
parser.add_option('-u', '--right',
dest='right',
action='store_true',
help=msg)
msg = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V'
parser.add_option('-v', '--left',
dest='left',
action='store_true',
help=msg)
parser.add_option('-0', '--logarithmic',
dest='logarithmic',
action='store_true',
help='calculate logarithmic strain tensor')
parser.add_option('-1', '--biot',
dest='biot',
action='store_true',
help='calculate biot strain tensor')
parser.add_option('-2', '--green',
dest='green',
action='store_true',
help='calculate green strain tensor')
# NOTE:
# It might be easier to just calculate one type of deformation gradient
# at a time.
msg = 'heading(s) of columns containing deformation tensor values'
parser.add_option('-f', '--defgrad',
dest='defgrad',
action='extend',
metavar='<string LIST>',
help=msg)
parser.set_defaults(right=False, left=False,
logarithmic=False, biot=False, green=False,
defgrad='f')
(options, filenames) = parser.parse_args()
stretches = []
strains = []
if options.right:
stretches.append('U')
if options.left:
stretches.append('V')
if options.logarithmic:
strains.append('ln')
if options.biot:
strains.append('Biot')
if options.green:
strains.append('Green')
if options.defgrad is None:
parser.error('no data column specified.')
# ----- Loop over input files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
continue
damask.util.report(scriptName, name)
# extract defgrads from HDF5 storage
F = h5f.get_data(options.defgrad)
# allow calculate multiple types of strain within the
# same cmd call
for stretchType in stretches:
for strainType in strains:
# calculate strain tensor for this type
eps = calcEPS(F, stretchType, strainType)
# compose labels/headers for this strain tensor
labelsStrain = strainType + stretchType
# prepare log info
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
# write data to HDF5 file
h5f.add_data(labelsStrain, eps, cmd_log=cmd_log)

View File

@ -1,130 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
# ------------------------------------------------------------------- #
# NOTE: #
# 1. Current Xdmf rendering in Paraview has some memory issue where #
# large number of polyvertices will cause segmentation fault. By #
# default, paraview output a cell based xdmf description, which #
# is working for small and medium mesh (<10,000) points. Hence a #
# rectangular mesh is used as the de facto Geometry description #
# here. #
# 2. Due to the unstable state Xdmf, it is safer to use port data #
# to VTR rather than using xdmf as interpretive layer for data #
# visualization. #
# ------------------------------------------------------------------- #
import os
import damask
import h5py
import xml.etree.cElementTree as ET
from optparse import OptionParser
from xml.dom import minidom
from damask.h5table import lables_to_path
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# ----- HELPER FUNCTIONS -----#
def addTopLvlCmt(xmlstr, topLevelCmt):
"""Add top level comment to string from ET"""
# a quick hack to add the top level comment to XML file
# --> somehow Elementtree does not provide this functionality
# --> by default
strList = xmlstr.split("\n")
strList[0] += "\n"+topLevelCmt
return "\n".join(strList)
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
msg = 'Generate Xdmf wrapper for HDF5 file.'
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description = msg,
version = scriptID)
(options, filenames) = parser.parse_args()
h5f = filenames[0]
h5f_base = h5f.split("/")[-1]
# ----- parse HDF5 file ----- #
h5f_dataDim = {}
h5f_dataPath = {}
h5f_dataType = {}
with h5py.File(h5f, 'a') as f:
labels = f.keys()
labels += f['/Constitutive'].keys()
labels += f['/Crystallite'].keys()
labels += ['Vx', 'Vy', "Vz"]
# remove group names as they do not contain real data
# TODO: use h5py/H5table API to detect dataset name to
# avoid necessary name space pruning.
labels.remove('Constitutive')
labels.remove('Crystallite')
labels.remove('Geometry')
# loop through remaining labels
for label in labels:
dataType, h5Path = lables_to_path(label)
h5f_dataType[label] = dataType
h5f_dataDim[label] = " ".join(map(str,f[h5Path].shape))
h5f_dataPath[label] = h5Path
# ----- constructing xdmf elements ----- #
root = ET.Element("Xdmf", version='3.3')
root.set('xmlns:xi', "http://www.w3.org/2001/XInclude")
root.append(ET.Comment('Generated Xdmf wapper for DAMASH H5 output'))
# usually there should only be ONE domain
domain = ET.SubElement(root, 'Domain',
Name=h5f_base.split(".")[0])
# use global topology through reference
grid = ET.SubElement(domain, 'Grid', GridType="Uniform")
# geometry section
geometry = ET.SubElement(grid, 'Geometry', GeometryType="VXVYVZ")
for vector in ["Vz", "Vy", "Vx"]:
dataitem = ET.SubElement(geometry, "DataItem",
DataType="Float",
Dimensions=h5f_dataDim[vector],
Name=vector,
Format="HDF")
dataitem.text = h5f_base.split("/")[-1] + ":{}".format(h5f_dataPath[vector])
# topology section
# TODO: support for other format based on given option
meshDim = [h5f_dataDim["Vz"], h5f_dataDim["Vy"], h5f_dataDim["Vx"]]
topology = ET.SubElement(grid, 'Topology',
TopologyType="3DRectMesh",
Dimensions=" ".join(map(str, meshDim)))
# attributes section
# Question: how to properly handle data mapping for multiphase situations
labelsProcessed = ['Vx', 'Vy', 'Vz']
# walk through each attributes
for label in labels:
if label in labelsProcessed: continue
print("adding {}...".format(label))
attr = ET.SubElement(grid, 'Attribute',
Name=label,
Type="None",
Center="Cell")
dataitem = ET.SubElement(attr, 'DataItem',
Name=label,
Format='HDF',
Dimensions=h5f_dataDim[label])
dataitem.text = h5f_base + ":" + h5f_dataPath[label]
# update progress list
labelsProcessed.append(label)
# pretty print the xdmf(xml) file content
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml(indent="\t")
xmlstr = addTopLvlCmt(xmlstr, '<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>')
# write str to file through native python API
with open(h5f.replace(".h5", ".xmf"), 'w') as f:
f.write(xmlstr)

View File

@ -1,191 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os
import vtk
import damask
from vtk.util import numpy_support
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
msg = "Add scalars, vectors, and/or an RGB tuple from"
msg += "an HDF5 to existing VTK rectilinear grid (.vtr/.vtk)."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=msg,
version=scriptID)
parser.add_option('--vtk',
dest='vtk',
type='string', metavar='string',
help='VTK file name')
parser.add_option('--inplace',
dest='inplace',
action='store_true',
help='modify VTK file in-place')
parser.add_option('-r', '--render',
dest='render',
action='store_true',
help='open output in VTK render window')
parser.add_option('-d', '--data',
dest='data',
action='extend', metavar='<string LIST>',
help='scalar/vector value(s) label(s)')
parser.add_option('-t', '--tensor',
dest='tensor',
action='extend', metavar='<string LIST>',
help='tensor (3x3) value label(s)')
parser.add_option('-c', '--color',
dest='color',
action='extend', metavar='<string LIST>',
help='RGB color tuple label')
parser.add_option('-m',
'--mode',
dest='mode',
metavar='string',
type='choice', choices=['cell', 'point'],
help='cell-centered or point-centered coordinates')
parser.set_defaults(data=[],
tensor=[],
color=[],
mode='cell',
inplace=False,
render=False)
(options, filenames) = parser.parse_args()
# ----- Legacy VTK format support ----- #
if os.path.splitext(options.vtk)[1] == '.vtr':
reader = vtk.vtkXMLRectilinearGridReader()
reader.SetFileName(options.vtk)
reader.Update()
rGrid = reader.GetOutput()
elif os.path.splitext(options.vtk)[1] == '.vtk':
reader = vtk.vtkGenericDataObjectReader()
reader.SetFileName(options.vtk)
reader.Update()
rGrid = reader.GetRectilinearGridOutput()
else:
parser.error('Unsupported VTK file type extension.')
Npoints = rGrid.GetNumberOfPoints()
Ncells = rGrid.GetNumberOfCells()
# ----- Summary output (Sanity Check) ----- #
msg = '{}: {} points and {} cells...'.format(options.vtk,
Npoints,
Ncells)
damask.util.croak(msg)
# ----- Read HDF5 file ----- #
# NOTE:
# --> It is possible in the future we are trying to add data
# from different increment into the same VTK file, but
# this feature is not supported for the moment.
# --> Let it fail, if the HDF5 is invalid, python interpretor
# --> should be able to catch this error.
h5f = damask.H5Table(filenames[0], new_file=False)
# ----- Process data ----- #
featureToAdd = {'data': options.data,
'tensor': options.tensor,
'color': options.color}
VTKarray = {} # store all vtkData in dict, then ship them to file
for dataType in featureToAdd.keys():
featureNames = featureToAdd[dataType]
for featureName in featureNames:
VTKtype = vtk.VTK_DOUBLE
VTKdata = h5f.get_data(featureName)
if dataType == 'color':
VTKtype = vtk.VTK_UNSIGNED_CHAR
VTKdata = (VTKdata*255).astype(int)
elif dataType == 'tensor':
# Force symmetries tensor type data
VTKdata[:, 1] = VTKdata[:, 3] = 0.5*(VTKdata[:, 1]+VTKdata[:, 3])
VTKdata[:, 2] = VTKdata[:, 6] = 0.5*(VTKdata[:, 2]+VTKdata[:, 6])
VTKdata[:, 5] = VTKdata[:, 7] = 0.5*(VTKdata[:, 5]+VTKdata[:, 7])
# use vtk build-in numpy support to add data (much faster)
# NOTE:
# --> deep copy is necessary here, otherwise memory leak could occur
VTKarray[featureName] = numpy_support.numpy_to_vtk(num_array=VTKdata,
deep=True,
array_type=VTKtype)
VTKarray[featureName].SetName(featureName)
# ----- ship data to vtkGrid ----- #
mode = options.mode
damask.util.croak('{} mode...'.format(mode))
# NOTE:
# --> For unknown reason, Paraview only recognize one
# tensor attributes per cell, thus it would be safe
# to only add one attributes as tensor.
for dataType in featureToAdd.keys():
featureNames = featureToAdd[dataType]
for featureName in featureNames:
if dataType == 'color':
if mode == 'cell':
rGrid.GetCellData().SetScalars(VTKarray[featureName])
elif mode == 'point':
rGrid.GetPointData().SetScalars(VTKarray[featureName])
elif dataType == 'tensor':
if mode == 'cell':
rGrid.GetCellData().SetTensors(VTKarray[featureName])
elif mode == 'point':
rGrid.GetPointData().SetTensors(VTKarray[featureName])
else:
if mode == 'cell':
rGrid.GetCellData().AddArray(VTKarray[featureName])
elif mode == 'point':
rGrid.GetPointData().AddArray(VTKarray[featureName])
rGrid.Modified()
if vtk.VTK_MAJOR_VERSION <= 5:
rGrid.Update()
# ----- write Grid to VTK file ----- #
writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
vtkFileN = os.path.splitext(options.vtk)[0]
vtkExtsn = '.vtr' if options.inplace else '_added.vtr'
writer.SetFileName(vtkFileN+vtkExtsn)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(rGrid)
else:
writer.SetInputData(rGrid)
writer.Write()
# ----- render results from script ----- #
if options.render:
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(rGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the graphics structure. The renderer renders into the
# render window. The render window interactor captures mouse events
# and will perform appropriate camera or actor manipulation
# depending on the nature of the events.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()

View File

@ -1,135 +0,0 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
# ------------------------------------------------------------------ #
# NOTE: #
# 1. It might be a good idea to separate IO and calculation. #
# 2. Some of the calculation could be useful in other situations, #
# why not build a math_util, or math_sup module that contains #
# all the useful functions. #
# ------------------------------------------------------------------ #
import os
import vtk
import numpy as np
import damask
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
# ----- HELPER FUNCTION ----- #
def getMeshFromXYZ(xyzArray, mode):
"""Calc Vx,Vy,Vz vectors for vtk rectangular mesh"""
# NOTE:
# --> np.unique will automatically sort the list
# --> although not exactly n(1), but since mesh dimension should
# small anyway, so this is still light weight.
dim = xyzArray.shape[1] # 2D:2, 3D:3
coords = [np.unique(xyzArray[:, i]) for i in range(dim)]
if mode == 'cell':
# since x, y, z might now have the same number of elements,
# we have to deal with them individually
for ri in range(dim):
vctr_pt = coords[ri]
vctr_cell = np.empty(len(vctr_pt)+1)
# calculate first and last end point
vctr_cell[0] = vctr_pt[0] - 0.5*abs(vctr_pt[1] - vctr_pt[0])
vctr_cell[-1] = vctr_pt[-1] + 0.5*abs(vctr_pt[-2] - vctr_pt[-1])
for cj in range(1, len(vctr_cell)-1):
vctr_cell[cj] = 0.5*(vctr_pt[cj-1] + vctr_pt[cj])
# update the coords
coords[ri] = vctr_cell
if dim < 3:
coords.append([0]) # expand to a 3D with 0 for z
# auxiliary description
grid = np.array(map(len, coords), 'i')
N = grid.prod() if mode == 'point' else (grid-1).prod()
return coords, grid, N
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
msg = "Create regular voxel grid from points in an ASCIItable."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description=msg,
version=scriptID)
parser.add_option('-m',
'--mode',
dest='mode',
metavar='string',
type='choice', choices=['cell', 'point'],
help='cell-centered or point-centered coordinates')
parser.add_option('-p',
'--pos', '--position',
dest='pos',
type='string', metavar='string',
help='label of coordinates [%default]')
parser.set_defaults(mode='cell',
pos='pos')
(options, filenames) = parser.parse_args()
# ----- loop over input files ----- #
for name in filenames:
try:
h5f = damask.H5Table(name, new_file=False)
except:
continue
damask.util.report(scriptName, name)
# ----- read xyzArray from HDF5 file ----- #
xyzArray = h5f.get_data(options.pos)
# ----- figure out size and grid ----- #
coords, grid, N = getMeshFromXYZ(xyzArray, options.mode)
# ----- process data ----- #
rGrid = vtk.vtkRectilinearGrid()
# WARNING: list expansion does not work here as these are
# just pointers for a vtk instance. Simply put,
# DON't USE
# [<VTK_CONSTRUCTOR>] * <NUM_OF_ELEMENTS>
coordArray = [vtk.vtkDoubleArray(),
vtk.vtkDoubleArray(),
vtk.vtkDoubleArray()]
rGrid.SetDimensions(*grid)
for i, points in enumerate(coords):
for point in points:
coordArray[i].InsertNextValue(point)
rGrid.SetXCoordinates(coordArray[0])
rGrid.SetYCoordinates(coordArray[1])
rGrid.SetZCoordinates(coordArray[2])
# ----- output result ----- #
dirPath = os.path.split(name)[0]
if name:
writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetCompressorTypeToZLib()
writer.SetDataModeToBinary()
# getting the name is a little bit tricky
vtkFileName = os.path.splitext(os.path.split(name)[1])[0]
vtkFileName += '_{}({})'.format(options.pos, options.mode)
vtkFileName += '.' + writer.GetDefaultFileExtension()
writer.SetFileName(os.path.join(dirPath, vtkFileName))
else:
writer = vtk.vtkDataSetWriter()
writer.SetHeader('# powered by '+scriptID)
writer.WriteToOutputStringOn()
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(rGrid)
else:
writer.SetInputData(rGrid)
writer.Write()

167
processing/post/marc_to_vtk.py Executable file
View File

@ -0,0 +1,167 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os,re
import argparse
import damask
import vtk, numpy as np
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
parser = argparse.ArgumentParser(description='Convert from Marc input file format to VTK', version = scriptID)
parser.add_argument('filename', type=str, nargs='+', help='files to convert')
args = parser.parse_args()
files = args.filename
if type(files) is str:
files = [files]
for f in files:
with open(f, 'r') as marcfile:
marctext = marcfile.read();
# Extract connectivity chunk from file...
connectivity_text = re.findall(r'connectivity[\n\r]+(.*?)[\n\r]+[a-zA-Z]', marctext, flags=(re.MULTILINE | re.DOTALL))[0]
connectivity_lines = re.split(r'[\n\r]+', connectivity_text, flags=(re.MULTILINE | re.DOTALL))
connectivity_header = connectivity_lines[0]
connectivity_lines = connectivity_lines[1:]
# Construct element map
elements = dict(map(lambda line:
(
int(line[0:10]), # index
{
'type': int(line[10:20]),
'verts': list(map(int, re.split(r' +', line[20:].strip())))
}
), connectivity_lines))
# Extract coordinate chunk from file
coordinates_text = re.findall(r'coordinates[\n\r]+(.*?)[\n\r]+[a-zA-Z]', marctext, flags=(re.MULTILINE | re.DOTALL))[0]
coordinates_lines = re.split(r'[\n\r]+', coordinates_text, flags=(re.MULTILINE | re.DOTALL))
coordinates_header = coordinates_lines[0]
coordinates_lines = coordinates_lines[1:]
# marc input file does not use "e" in scientific notation, this adds it and converts
fl_format = lambda string: float(re.sub(r'(\d)([\+\-])', r'\1e\2', string))
# Construct coordinate map
coordinates = dict(map(lambda line:
(
int(line[0:10]),
np.array([
fl_format(line[10:30]),
fl_format(line[30:50]),
fl_format(line[50:70])
])
), coordinates_lines))
# Subdivide volumes
grid = vtk.vtkUnstructuredGrid()
vertex_count = len(coordinates)
edge_to_vert = dict() # when edges are subdivided, a new vertex in the middle is produced and placed in here
ordered_pair = lambda a, b: (a, b) if a < b else (b, a) # edges are bidirectional
def subdivide_edge(vert1, vert2):
edge = ordered_pair(vert1, vert2)
if edge in edge_to_vert:
return edge_to_vert[edge]
newvert = len(coordinates) + 1
coordinates[newvert] = 0.5 * (coordinates[vert1] + coordinates[vert2]) # Average
edge_to_vert[edge] = newvert;
return newvert;
for el_id in range(1, len(elements) + 1):
el = elements[el_id]
if el['type'] == 7:
# Hexahedron, subdivided
# There may be a better way to iterate over these, but this is consistent
# with the ordering scheme provided at https://damask.mpie.de/pub/Documentation/ElementType
subverts = np.zeros((3,3,3), dtype=int)
# Get corners
subverts[0, 0, 0] = el['verts'][0]
subverts[2, 0, 0] = el['verts'][1]
subverts[2, 2, 0] = el['verts'][2]
subverts[0, 2, 0] = el['verts'][3]
subverts[0, 0, 2] = el['verts'][4]
subverts[2, 0, 2] = el['verts'][5]
subverts[2, 2, 2] = el['verts'][6]
subverts[0, 2, 2] = el['verts'][7]
# lower edges
subverts[1, 0, 0] = subdivide_edge(subverts[0, 0, 0], subverts[2, 0, 0])
subverts[2, 1, 0] = subdivide_edge(subverts[2, 0, 0], subverts[2, 2, 0])
subverts[1, 2, 0] = subdivide_edge(subverts[2, 2, 0], subverts[0, 2, 0])
subverts[0, 1, 0] = subdivide_edge(subverts[0, 2, 0], subverts[0, 0, 0])
# middle edges
subverts[0, 0, 1] = subdivide_edge(subverts[0, 0, 0], subverts[0, 0, 2])
subverts[2, 0, 1] = subdivide_edge(subverts[2, 0, 0], subverts[2, 0, 2])
subverts[2, 2, 1] = subdivide_edge(subverts[2, 2, 0], subverts[2, 2, 2])
subverts[0, 2, 1] = subdivide_edge(subverts[0, 2, 0], subverts[0, 2, 2])
# top edges
subverts[1, 0, 2] = subdivide_edge(subverts[0, 0, 2], subverts[2, 0, 2])
subverts[2, 1, 2] = subdivide_edge(subverts[2, 0, 2], subverts[2, 2, 2])
subverts[1, 2, 2] = subdivide_edge(subverts[2, 2, 2], subverts[0, 2, 2])
subverts[0, 1, 2] = subdivide_edge(subverts[0, 2, 2], subverts[0, 0, 2])
# then faces... The edge_to_vert addition is due to there being two ways
# to calculate a face, depending which opposite vertices are used to subdivide
subverts[1, 1, 0] = subdivide_edge(subverts[1, 0, 0], subverts[1, 2, 0])
edge_to_vert[ordered_pair(subverts[0, 1, 0], subverts[2, 1, 0])] = subverts[1, 1, 0]
subverts[1, 0, 1] = subdivide_edge(subverts[1, 0, 0], subverts[1, 0, 2])
edge_to_vert[ordered_pair(subverts[0, 0, 1], subverts[2, 0, 1])] = subverts[1, 0, 1]
subverts[2, 1, 1] = subdivide_edge(subverts[2, 1, 0], subverts[2, 1, 2])
edge_to_vert[ordered_pair(subverts[2, 0, 1], subverts[2, 2, 1])] = subverts[2, 1, 1]
subverts[1, 2, 1] = subdivide_edge(subverts[1, 2, 0], subverts[1, 2, 2])
edge_to_vert[ordered_pair(subverts[0, 2, 1], subverts[2, 2, 1])] = subverts[1, 2, 1]
subverts[0, 1, 1] = subdivide_edge(subverts[0, 1, 0], subverts[0, 1, 2])
edge_to_vert[ordered_pair(subverts[0, 0, 1], subverts[0, 2, 1])] = subverts[0, 1, 1]
subverts[1, 1, 2] = subdivide_edge(subverts[1, 0, 2], subverts[1, 2, 2])
edge_to_vert[ordered_pair(subverts[0, 1, 2], subverts[2, 1, 2])] = subverts[1, 1, 2]
# and finally the center. There are three ways to calculate, but elements should
# not intersect, so the edge_to_vert part isn't needed here.
subverts[1, 1, 1] = subdivide_edge(subverts[1, 1, 0], subverts[1, 1, 2])
# Now make the hexahedron subelements
# order in which vtk expects vertices for a hexahedron
order = np.array([(0,0,0),(1,0,0),(1,1,0),(0,1,0),(0,0,1),(1,0,1),(1,1,1),(0,1,1)])
for z in range(2):
for y in range(2):
for x in range(2):
hex_ = vtk.vtkHexahedron()
for vert_id in range(8):
coord = order[vert_id] + (x, y, z)
hex_.GetPointIds().SetId(vert_id, subverts[coord[0], coord[1], coord[2]] - 1) # minus one, since vtk starts at zero but marc starts at one
grid.InsertNextCell(hex_.GetCellType(), hex_.GetPointIds())
else:
damask.util.croak('Unsupported Marc element type: {} (skipping)'.format(el['type']))
# Load all points
points = vtk.vtkPoints()
for point in range(1, len(coordinates) + 1): # marc indices start at 1
points.InsertNextPoint(coordinates[point].tolist())
grid.SetPoints(points)
# grid now contains the elements from the given marc file
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(re.sub(r'\..+', ".vtu", f)) # *.vtk extension does not work in paraview
#writer.SetCompressorTypeToZLib()
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
else: writer.SetInputData(grid)
writer.Write()

View File

@ -0,0 +1,206 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os,vtk
import damask
from vtk.util import numpy_support
from collections import defaultdict
from optparse import OptionParser
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
msg = "Add scalars, vectors, and/or an RGB tuple from"
msg += "an ASCIItable to existing VTK grid (.vtr/.vtk/.vtu)."
parser = OptionParser(option_class=damask.extendableOption,
usage='%prog options [file[s]]',
description = msg,
version = scriptID)
parser.add_option( '--vtk',
dest = 'vtk',
type = 'string', metavar = 'string',
help = 'VTK file name')
parser.add_option( '--inplace',
dest = 'inplace',
action = 'store_true',
help = 'modify VTK file in-place')
parser.add_option('-r', '--render',
dest = 'render',
action = 'store_true',
help = 'open output in VTK render window')
parser.add_option('-d', '--data',
dest = 'data',
action = 'extend', metavar = '<string LIST>',
help = 'scalar/vector value(s) label(s)')
parser.add_option('-t', '--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'tensor (3x3) value label(s)')
parser.add_option('-c', '--color',
dest = 'color',
action = 'extend', metavar = '<string LIST>',
help = 'RGB color tuple label')
parser.set_defaults(data = [],
tensor = [],
color = [],
inplace = False,
render = False,
)
(options, filenames) = parser.parse_args()
if not options.vtk: parser.error('No VTK file specified.')
if not os.path.exists(options.vtk): parser.error('VTK file does not exist.')
if os.path.splitext(options.vtk)[1] == '.vtr':
reader = vtk.vtkXMLRectilinearGridReader()
reader.SetFileName(options.vtk)
reader.Update()
rGrid = reader.GetOutput()
writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
elif os.path.splitext(options.vtk)[1] == '.vtk':
reader = vtk.vtkGenericDataObjectReader()
reader.SetFileName(options.vtk)
reader.Update()
rGrid = reader.GetRectilinearGridOutput()
writer = vtk.vtkXMLRectilinearGridWriter()
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtr' if options.inplace else '_added.vtr'))
elif os.path.splitext(options.vtk)[1] == '.vtu':
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(options.vtk)
reader.Update()
rGrid = reader.GetOutput()
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(os.path.splitext(options.vtk)[0]+('.vtu' if options.inplace else '_added.vtu'))
else:
parser.error('Unsupported VTK file type extension.')
Npoints = rGrid.GetNumberOfPoints()
Ncells = rGrid.GetNumberOfCells()
damask.util.croak('{}: {} points and {} cells...'.format(options.vtk,Npoints,Ncells))
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try: table = damask.ASCIItable(name = name,
buffered = False,
readonly = True)
except: continue
damask.util.report(scriptName, name)
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
remarks = []
errors = []
VTKarray = {}
active = defaultdict(list)
for datatype,dimension,label in [['data',99,options.data],
['tensor',9,options.tensor],
['color' ,3,options.color],
]:
for i,dim in enumerate(table.label_dimension(label)):
me = label[i]
if dim == -1: remarks.append('{} "{}" not found...'.format(datatype,me))
elif dim > dimension: remarks.append('"{}" not of dimension {}...'.format(me,dimension))
else:
remarks.append('adding {} "{}"...'.format(datatype,me))
active[datatype].append(me)
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ process data ---------------------------------------
table.data_readArray([item for sublist in active.values() for item in sublist]) # read all requested data
for datatype,labels in active.items(): # loop over scalar,color
for me in labels: # loop over all requested items
VTKtype = vtk.VTK_DOUBLE
VTKdata = table.data[:, table.label_indexrange(me)].copy() # copy to force contiguous layout
if datatype == 'color':
VTKtype = vtk.VTK_UNSIGNED_CHAR
VTKdata = (VTKdata*255).astype(int) # translate to 0..255 UCHAR
elif datatype == 'tensor':
VTKdata[:,1] = VTKdata[:,3] = 0.5*(VTKdata[:,1]+VTKdata[:,3])
VTKdata[:,2] = VTKdata[:,6] = 0.5*(VTKdata[:,2]+VTKdata[:,6])
VTKdata[:,5] = VTKdata[:,7] = 0.5*(VTKdata[:,5]+VTKdata[:,7])
VTKarray[me] = numpy_support.numpy_to_vtk(num_array=VTKdata,deep=True,array_type=VTKtype)
VTKarray[me].SetName(me)
table.close() # close input ASCII table
# ------------------------------------------ add data ---------------------------------------
if len(table.data) == Npoints: mode = 'point'
elif len(table.data) == Ncells: mode = 'cell'
else:
damask.util.croak('Data count is incompatible with grid...')
continue
damask.util.croak('{} mode...'.format(mode))
for datatype,labels in active.items(): # loop over scalar,color
if datatype == 'color':
if mode == 'cell': rGrid.GetCellData().SetScalars(VTKarray[active['color'][0]])
elif mode == 'point': rGrid.GetPointData().SetScalars(VTKarray[active['color'][0]])
for me in labels: # loop over all requested items
if mode == 'cell': rGrid.GetCellData().AddArray(VTKarray[me])
elif mode == 'point': rGrid.GetPointData().AddArray(VTKarray[me])
rGrid.Modified()
if vtk.VTK_MAJOR_VERSION <= 5: rGrid.Update()
# ------------------------------------------ output result ---------------------------------------
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(rGrid)
else: writer.SetInputData(rGrid)
writer.Write()
# ------------------------------------------ render result ---------------------------------------
if options.render:
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(rGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the graphics structure. The renderer renders into the
# render window. The render window interactor captures mouse events
# and will perform appropriate camera or actor manipulation
# depending on the nature of the events.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()

0
processing/pre/3DRVEfrom2Dang.py Normal file → Executable file
View File

View File

@ -162,6 +162,7 @@ subroutine CPFEM_init
write(6,'(/,a)') ' <<<+- CPFEM init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
flush(6)
endif mainProcess
! initialize stress and jacobian to zero
@ -242,8 +243,8 @@ subroutine CPFEM_init
write(6,'(a32,1x,6(i8,1x))') 'CPFEM_dcsdE: ', shape(CPFEM_dcsdE)
write(6,'(a32,1x,6(i8,1x),/)') 'CPFEM_dcsdE_knownGood: ', shape(CPFEM_dcsdE_knownGood)
write(6,'(a32,l1)') 'symmetricSolver: ', symmetricSolver
flush(6)
endif
flush(6)
end subroutine CPFEM_init

View File

@ -9,7 +9,7 @@ module CPFEM2
private
public :: &
CPFEM_general, &
CPFEM_age, &
CPFEM_initAll
contains
@ -127,6 +127,7 @@ subroutine CPFEM_init
write(6,'(/,a)') ' <<<+- CPFEM init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
flush(6)
endif mainProcess
! *** restore the last converged values of each essential variable from the binary file
@ -194,7 +195,6 @@ subroutine CPFEM_init
restartRead = .false.
endif
flush(6)
end subroutine CPFEM_init
@ -202,7 +202,7 @@ end subroutine CPFEM_init
!--------------------------------------------------------------------------------------------------
!> @brief perform initialization at first call, update variables and call the actual material model
!--------------------------------------------------------------------------------------------------
subroutine CPFEM_general(age, dt)
subroutine CPFEM_age()
use prec, only: &
pReal, &
pInt
@ -215,7 +215,6 @@ subroutine CPFEM_general(age, dt)
debug_levelExtensive, &
debug_levelSelective
use FEsolving, only: &
terminallyIll, &
restartWrite
use math, only: &
math_identity2nd, &
@ -254,114 +253,99 @@ subroutine CPFEM_general(age, dt)
crystallite_dPdF, &
crystallite_Tstar0_v, &
crystallite_Tstar_v
use homogenization, only: &
materialpoint_stressAndItsTangent, &
materialpoint_postResults
use IO, only: &
IO_write_jobRealFile, &
IO_warning
use DAMASK_interface
implicit none
real(pReal), intent(in) :: dt !< time increment
logical, intent(in) :: age !< age results
integer(pInt) :: i, k, l, m, ph, homog, mySource
character(len=1024) :: rankStr
character(len=32) :: rankStr
!*** age results and write restart data if requested
if (age) then
crystallite_F0 = crystallite_partionedF ! crystallite deformation (_subF is perturbed...)
crystallite_Fp0 = crystallite_Fp ! crystallite plastic deformation
crystallite_Lp0 = crystallite_Lp ! crystallite plastic velocity
crystallite_Fi0 = crystallite_Fi ! crystallite intermediate deformation
crystallite_Li0 = crystallite_Li ! crystallite intermediate velocity
crystallite_dPdF0 = crystallite_dPdF ! crystallite stiffness
crystallite_Tstar0_v = crystallite_Tstar_v ! crystallite 2nd Piola Kirchhoff stress
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) &
write(6,'(a)') '<< CPFEM >> aging states'
forall ( i = 1:size(plasticState )) plasticState(i)%state0 = plasticState(i)%state ! copy state in this lenghty way because: A component cannot be an array if the encompassing structure is an array
do i = 1, size(sourceState)
do mySource = 1,phase_Nsources(i)
sourceState(i)%p(mySource)%state0 = sourceState(i)%p(mySource)%state ! copy state in this lenghty way because: A component cannot be an array if the encompassing structure is an array
enddo; enddo
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) &
write(6,'(a)') '<< CPFEM >> aging states'
crystallite_F0 = crystallite_partionedF ! crystallite deformation (_subF is perturbed...)
crystallite_Fp0 = crystallite_Fp ! crystallite plastic deformation
crystallite_Lp0 = crystallite_Lp ! crystallite plastic velocity
crystallite_Fi0 = crystallite_Fi ! crystallite intermediate deformation
crystallite_Li0 = crystallite_Li ! crystallite intermediate velocity
crystallite_dPdF0 = crystallite_dPdF ! crystallite stiffness
crystallite_Tstar0_v = crystallite_Tstar_v ! crystallite 2nd Piola Kirchhoff stress
do homog = 1_pInt, material_Nhomogenization
homogState (homog)%state0 = homogState (homog)%state
thermalState (homog)%state0 = thermalState (homog)%state
damageState (homog)%state0 = damageState (homog)%state
vacancyfluxState (homog)%state0 = vacancyfluxState (homog)%state
hydrogenfluxState(homog)%state0 = hydrogenfluxState(homog)%state
enddo
forall (i = 1:size(plasticState)) plasticState(i)%state0 = plasticState(i)%state ! copy state in this lengthy way because: A component cannot be an array if the encompassing structure is an array
do i = 1, size(sourceState)
do mySource = 1,phase_Nsources(i)
sourceState(i)%p(mySource)%state0 = sourceState(i)%p(mySource)%state ! copy state in this lengthy way because: A component cannot be an array if the encompassing structure is an array
enddo; enddo
if (restartWrite) then
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) &
write(6,'(a)') '<< CPFEM >> writing state variables of last converged step to binary files'
write(rankStr,'(a1,i0)')'_',worldrank
do homog = 1_pInt, material_Nhomogenization
homogState (homog)%state0 = homogState (homog)%state
thermalState (homog)%state0 = thermalState (homog)%state
damageState (homog)%state0 = damageState (homog)%state
vacancyfluxState (homog)%state0 = vacancyfluxState (homog)%state
hydrogenfluxState(homog)%state0 = hydrogenfluxState(homog)%state
enddo
call IO_write_jobRealFile(777,'recordedPhase'//trim(rankStr),size(material_phase))
write (777,rec=1) material_phase
close (777)
if (restartWrite) then
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) &
write(6,'(a)') '<< CPFEM >> writing state variables of last converged step to binary files'
call IO_write_jobRealFile(777,'convergedF'//trim(rankStr),size(crystallite_F0))
write (777,rec=1) crystallite_F0
close (777)
write(rankStr,'(a1,i0)')'_',worldrank
call IO_write_jobRealFile(777,'convergedFp'//trim(rankStr),size(crystallite_Fp0))
write (777,rec=1) crystallite_Fp0
close (777)
call IO_write_jobRealFile(777,'recordedPhase'//trim(rankStr),size(material_phase))
write (777,rec=1) material_phase; close (777)
call IO_write_jobRealFile(777,'convergedFi'//trim(rankStr),size(crystallite_Fi0))
write (777,rec=1) crystallite_Fi0
close (777)
call IO_write_jobRealFile(777,'convergedF'//trim(rankStr),size(crystallite_F0))
write (777,rec=1) crystallite_F0; close (777)
call IO_write_jobRealFile(777,'convergedLp'//trim(rankStr),size(crystallite_Lp0))
write (777,rec=1) crystallite_Lp0
close (777)
call IO_write_jobRealFile(777,'convergedFp'//trim(rankStr),size(crystallite_Fp0))
write (777,rec=1) crystallite_Fp0; close (777)
call IO_write_jobRealFile(777,'convergedLi'//trim(rankStr),size(crystallite_Li0))
write (777,rec=1) crystallite_Li0
close (777)
call IO_write_jobRealFile(777,'convergedFi'//trim(rankStr),size(crystallite_Fi0))
write (777,rec=1) crystallite_Fi0; close (777)
call IO_write_jobRealFile(777,'convergeddPdF'//trim(rankStr),size(crystallite_dPdF0))
write (777,rec=1) crystallite_dPdF0
close (777)
call IO_write_jobRealFile(777,'convergedLp'//trim(rankStr),size(crystallite_Lp0))
write (777,rec=1) crystallite_Lp0; close (777)
call IO_write_jobRealFile(777,'convergedTstar'//trim(rankStr),size(crystallite_Tstar0_v))
write (777,rec=1) crystallite_Tstar0_v
close (777)
call IO_write_jobRealFile(777,'convergedLi'//trim(rankStr),size(crystallite_Li0))
write (777,rec=1) crystallite_Li0; close (777)
call IO_write_jobRealFile(777,'convergedStateConst'//trim(rankStr))
m = 0_pInt
writePlasticityInstances: do ph = 1_pInt, size(phase_plasticity)
do k = 1_pInt, plasticState(ph)%sizeState
do l = 1, size(plasticState(ph)%state0(1,:))
m = m+1_pInt
write(777,rec=m) plasticState(ph)%state0(k,l)
enddo; enddo
enddo writePlasticityInstances
close (777)
call IO_write_jobRealFile(777,'convergeddPdF'//trim(rankStr),size(crystallite_dPdF0))
write (777,rec=1) crystallite_dPdF0; close (777)
call IO_write_jobRealFile(777,'convergedStateHomog'//trim(rankStr))
m = 0_pInt
writeHomogInstances: do homog = 1_pInt, material_Nhomogenization
do k = 1_pInt, homogState(homog)%sizeState
do l = 1, size(homogState(homog)%state0(1,:))
m = m+1_pInt
write(777,rec=m) homogState(homog)%state0(k,l)
enddo; enddo
enddo writeHomogInstances
close (777)
call IO_write_jobRealFile(777,'convergedTstar'//trim(rankStr),size(crystallite_Tstar0_v))
write (777,rec=1) crystallite_Tstar0_v; close (777)
endif
endif
call IO_write_jobRealFile(777,'convergedStateConst'//trim(rankStr))
m = 0_pInt
writePlasticityInstances: do ph = 1_pInt, size(phase_plasticity)
do k = 1_pInt, plasticState(ph)%sizeState
do l = 1, size(plasticState(ph)%state0(1,:))
m = m+1_pInt
write(777,rec=m) plasticState(ph)%state0(k,l)
enddo; enddo
enddo writePlasticityInstances
close (777)
if (.not. terminallyIll) &
call materialpoint_stressAndItsTangent(.True., dt)
call IO_write_jobRealFile(777,'convergedStateHomog'//trim(rankStr))
m = 0_pInt
writeHomogInstances: do homog = 1_pInt, material_Nhomogenization
do k = 1_pInt, homogState(homog)%sizeState
do l = 1, size(homogState(homog)%state0(1,:))
m = m+1_pInt
write(777,rec=m) homogState(homog)%state0(k,l)
enddo; enddo
enddo writeHomogInstances
close (777)
end subroutine CPFEM_general
endif
if (iand(debug_level(debug_CPFEM), debug_levelBasic) /= 0_pInt) &
write(6,'(a)') '<< CPFEM >> done aging states'
end subroutine CPFEM_age
end module CPFEM2

View File

@ -456,21 +456,21 @@ program DAMASK_spectral
fileOffset = fileOffset + sum(outputSize) ! forward to current file position
endif
!--------------------------------------------------------------------------------------------------
! loopping over loadcases
! looping over loadcases
loadCaseLooping: do currentLoadCase = 1_pInt, size(loadCases)
time0 = time ! currentLoadCase start time
guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc
!--------------------------------------------------------------------------------------------------
! loop oper incs defined in input file for current currentLoadCase
! loop over incs defined in input file for current currentLoadCase
incLooping: do inc = 1_pInt, loadCases(currentLoadCase)%incs
totalIncsCounter = totalIncsCounter + 1_pInt
!--------------------------------------------------------------------------------------------------
! forwarding time
timeIncOld = timeinc
timeIncOld = timeinc ! last timeinc that brought former inc to an end
if (loadCases(currentLoadCase)%logscale == 0_pInt) then ! linear scale
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pReal) ! only valid for given linear time scale. will be overwritten later in case loglinear scale is used
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pReal)
else
if (currentLoadCase == 1_pInt) then ! 1st currentLoadCase of logarithmic scale
if (inc == 1_pInt) then ! 1st inc of 1st currentLoadCase of logarithmic scale
@ -486,8 +486,13 @@ program DAMASK_spectral
real(loadCases(currentLoadCase)%incs ,pReal)))
endif
endif
<<<<<<< HEAD
timeinc = timeinc / 2.0_pReal**real(cutBackLevel,pReal) ! depending on cut back level, decrease time step
! QUESTION: what happens to inc-counter when cutbacklevel is not zero? not clear where half an inc gets incremented..?
=======
timeinc = timeinc / real(subStepFactor,pReal)**real(cutBackLevel,pReal) ! depending on cut back level, decrease time step
>>>>>>> spectralSolver-cutbackfix
skipping: if (totalIncsCounter < restartInc) then ! not yet at restart inc?
time = time + timeinc ! just advance time, skip already performed calculation
guess = .true. ! QUESTION:why forced guessing instead of inheriting loadcase preference
@ -512,11 +517,11 @@ program DAMASK_spectral
's: Increment ', inc, '/', loadCases(currentLoadCase)%incs,&
'-', stepFraction, '/', subStepFactor**cutBackLevel,&
' of load case ', currentLoadCase,'/',size(loadCases)
flush(6)
write(incInfo,'(a,'//IO_intOut(totalIncsCounter)//',a,'//IO_intOut(sum(loadCases%incs))//&
',a,'//IO_intOut(stepFraction)//',a,'//IO_intOut(subStepFactor**cutBackLevel)//')') &
'Increment ',totalIncsCounter,'/',sum(loadCases%incs),&
'-',stepFraction, '/', subStepFactor**cutBackLevel
flush(6)
!--------------------------------------------------------------------------------------------------
! forward fields
@ -545,7 +550,7 @@ program DAMASK_spectral
end select
case(FIELD_THERMAL_ID); call spectral_thermal_forward()
case(FIELD_DAMAGE_ID); call spectral_damage_forward()
case(FIELD_DAMAGE_ID); call spectral_damage_forward()
end select
enddo
@ -592,6 +597,7 @@ program DAMASK_spectral
stagIter = stagIter + 1_pInt
stagIterate = stagIter < stagItMax &
.and. all(solres(:)%converged) &
<<<<<<< HEAD
.and. .not. all(solres(:)%stagConverged)
enddo
@ -622,12 +628,41 @@ program DAMASK_spectral
endif
if (.not. cutBack) then
=======
.and. .not. all(solres(:)%stagConverged) ! stationary with respect to staggered iteration
enddo
!--------------------------------------------------------------------------------------------------
! check solution for either advance or retry
if ( (continueCalculation .or. all(solres(:)%converged .and. solres(:)%stagConverged)) & ! don't care or did converge
.and. .not. solres(1)%termIll) then ! and acceptable solution found
timeIncOld = timeinc
cutBack = .false.
guess = .true. ! start guessing after first converged (sub)inc
>>>>>>> spectralSolver-cutbackfix
if (worldrank == 0) then
write(statUnit,*) totalIncsCounter, time, cutBackLevel, &
solres%converged, solres%iterationsNeeded ! write statistics about accepted solution
solres%converged, solres%iterationsNeeded
flush(statUnit)
endif
elseif (cutBackLevel < maxCutBack) then ! further cutbacking tolerated?
cutBack = .true.
stepFraction = (stepFraction - 1_pInt) * subStepFactor ! adjust to new denominator
cutBackLevel = cutBackLevel + 1_pInt
time = time - timeinc ! rewind time
timeinc = timeinc/real(subStepFactor,pReal) ! cut timestep
write(6,'(/,a)') ' cutting back '
else ! no more options to continue
call IO_warning(850_pInt)
call MPI_file_close(resUnit,ierr)
close(statUnit)
call quit(-1_pInt*(lastRestartWritten+1_pInt)) ! quit and provide information about last restart inc written
endif
<<<<<<< HEAD
=======
>>>>>>> spectralSolver-cutbackfix
enddo subStepLooping
cutBackLevel = max(0_pInt, cutBackLevel - 1_pInt) ! try half number of subincs next inc
@ -645,9 +680,14 @@ program DAMASK_spectral
if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0_pInt) then ! at output frequency
if (worldrank == 0) &
write(6,'(1/,a)') ' ... writing results to file ......................................'
flush(6)
call materialpoint_postResults()
call MPI_file_seek (resUnit,fileOffset,MPI_SEEK_SET,ierr)
<<<<<<< HEAD
if (ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
=======
if (ierr /= 0_pInt) call IO_error(894_pInt, ext_msg='MPI_file_seek')
>>>>>>> spectralSolver-cutbackfix
do i=1, size(materialpoint_results,3)/(maxByteOut/(materialpoint_sizeResults*pReal))+1 ! slice the output of my process in chunks not exceeding the limit for one output
outputIndex=int([(i-1_pInt)*((maxRealOut)/materialpoint_sizeResults)+1_pInt, &
min(i*((maxRealOut)/materialpoint_sizeResults),size(materialpoint_results,3))],pLongInt)
@ -677,6 +717,7 @@ program DAMASK_spectral
real(convergedCounter, pReal)/&
real(notConvergedCounter + convergedCounter,pReal)*100.0_pReal, &
' %) increments converged!'
flush(6)
call MPI_file_close(resUnit,ierr)
close(statUnit)

File diff suppressed because it is too large Load Diff

View File

@ -190,11 +190,11 @@ subroutine constitutive_init()
if (any(phase_kinematics == KINEMATICS_hydrogen_strain_ID)) call kinematics_hydrogen_strain_init(FILEUNIT)
close(FILEUNIT)
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- constitutive init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
mainProcess: if (worldrank == 0) then
!--------------------------------------------------------------------------------------------------
! write description file for constitutive output
call IO_write_jobFile(FILEUNIT,'outputConstitutive')

View File

@ -72,8 +72,6 @@ subroutine damage_local_init(fileUnit)
damage, &
damage_initialPhi, &
material_partHomogenization
use numerics,only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -86,11 +84,9 @@ subroutine damage_local_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_local_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(damage_type == DAMAGE_local_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -26,19 +26,15 @@ subroutine damage_none_init()
use IO, only: &
IO_timeStamp
use material
use numerics, only: &
worldrank
implicit none
integer(pInt) :: &
homog, &
NofMyHomog
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_none_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_none_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
initializeInstances: do homog = 1_pInt, material_Nhomogenization

View File

@ -77,8 +77,6 @@ subroutine damage_nonlocal_init(fileUnit)
damage, &
damage_initialPhi, &
material_partHomogenization
use numerics,only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -91,11 +89,9 @@ subroutine damage_nonlocal_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- damage_'//DAMAGE_nonlocal_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(damage_type == DAMAGE_nonlocal_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -16,7 +16,7 @@ module homogenization
! General variables for the homogenization at a material point
implicit none
private
real(pReal), dimension(:,:,:,:), allocatable, public :: &
real(pReal), dimension(:,:,:,:), allocatable, public :: &
materialpoint_F0, & !< def grad of IP at start of FE increment
materialpoint_F, & !< def grad of IP to be reached at end of FE increment
materialpoint_P !< first P--K stress of IP
@ -128,7 +128,7 @@ subroutine homogenization_init
integer(pInt), dimension(:) , pointer :: thisNoutput
character(len=64), dimension(:,:), pointer :: thisOutput
character(len=32) :: outputName !< name of output, intermediate fix until HDF5 output is ready
logical :: knownHomogenization, knownThermal, knownDamage, knownVacancyflux, knownPorosity, knownHydrogenflux
logical :: valid
!--------------------------------------------------------------------------------------------------
@ -199,7 +199,7 @@ subroutine homogenization_init
do p = 1,material_Nhomogenization
if (any(material_homog == p)) then
i = homogenization_typeInstance(p) ! which instance of this homogenization type
knownHomogenization = .true. ! assume valid
valid = .true. ! assume valid
select case(homogenization_type(p)) ! split per homogenization type
case (HOMOGENIZATION_NONE_ID)
outputName = HOMOGENIZATION_NONE_label
@ -217,10 +217,10 @@ subroutine homogenization_init
thisOutput => homogenization_RGC_output
thisSize => homogenization_RGC_sizePostResult
case default
knownHomogenization = .false.
valid = .false.
end select
write(FILEUNIT,'(/,a,/)') '['//trim(homogenization_name(p))//']'
if (knownHomogenization) then
if (valid) then
write(FILEUNIT,'(a)') '(type)'//char(9)//trim(outputName)
write(FILEUNIT,'(a,i4)') '(ngrains)'//char(9),homogenization_Ngrains(p)
if (homogenization_type(p) /= HOMOGENIZATION_NONE_ID) then
@ -230,7 +230,7 @@ subroutine homogenization_init
endif
endif
i = thermal_typeInstance(p) ! which instance of this thermal type
knownThermal = .true. ! assume valid
valid = .true. ! assume valid
select case(thermal_type(p)) ! split per thermal type
case (THERMAL_isothermal_ID)
outputName = THERMAL_isothermal_label
@ -248,9 +248,9 @@ subroutine homogenization_init
thisOutput => thermal_conduction_output
thisSize => thermal_conduction_sizePostResult
case default
knownThermal = .false.
valid = .false.
end select
if (knownThermal) then
if (valid) then
write(FILEUNIT,'(a)') '(thermal)'//char(9)//trim(outputName)
if (thermal_type(p) /= THERMAL_isothermal_ID) then
do e = 1,thisNoutput(i)
@ -259,7 +259,7 @@ subroutine homogenization_init
endif
endif
i = damage_typeInstance(p) ! which instance of this damage type
knownDamage = .true. ! assume valid
valid = .true. ! assume valid
select case(damage_type(p)) ! split per damage type
case (DAMAGE_none_ID)
outputName = DAMAGE_none_label
@ -277,9 +277,9 @@ subroutine homogenization_init
thisOutput => damage_nonlocal_output
thisSize => damage_nonlocal_sizePostResult
case default
knownDamage = .false.
valid = .false.
end select
if (knownDamage) then
if (valid) then
write(FILEUNIT,'(a)') '(damage)'//char(9)//trim(outputName)
if (damage_type(p) /= DAMAGE_none_ID) then
do e = 1,thisNoutput(i)
@ -288,7 +288,7 @@ subroutine homogenization_init
endif
endif
i = vacancyflux_typeInstance(p) ! which instance of this vacancy flux type
knownVacancyflux = .true. ! assume valid
valid = .true. ! assume valid
select case(vacancyflux_type(p)) ! split per vacancy flux type
case (VACANCYFLUX_isoconc_ID)
outputName = VACANCYFLUX_isoconc_label
@ -306,9 +306,9 @@ subroutine homogenization_init
thisOutput => vacancyflux_cahnhilliard_output
thisSize => vacancyflux_cahnhilliard_sizePostResult
case default
knownVacancyflux = .false.
valid = .false.
end select
if (knownVacancyflux) then
if (valid) then
write(FILEUNIT,'(a)') '(vacancyflux)'//char(9)//trim(outputName)
if (vacancyflux_type(p) /= VACANCYFLUX_isoconc_ID) then
do e = 1,thisNoutput(i)
@ -317,7 +317,7 @@ subroutine homogenization_init
endif
endif
i = porosity_typeInstance(p) ! which instance of this porosity type
knownPorosity = .true. ! assume valid
valid = .true. ! assume valid
select case(porosity_type(p)) ! split per porosity type
case (POROSITY_none_ID)
outputName = POROSITY_none_label
@ -330,9 +330,9 @@ subroutine homogenization_init
thisOutput => porosity_phasefield_output
thisSize => porosity_phasefield_sizePostResult
case default
knownPorosity = .false.
valid = .false.
end select
if (knownPorosity) then
if (valid) then
write(FILEUNIT,'(a)') '(porosity)'//char(9)//trim(outputName)
if (porosity_type(p) /= POROSITY_none_ID) then
do e = 1,thisNoutput(i)
@ -341,7 +341,7 @@ subroutine homogenization_init
endif
endif
i = hydrogenflux_typeInstance(p) ! which instance of this hydrogen flux type
knownHydrogenflux = .true. ! assume valid
valid = .true. ! assume valid
select case(hydrogenflux_type(p)) ! split per hydrogen flux type
case (HYDROGENFLUX_isoconc_ID)
outputName = HYDROGENFLUX_isoconc_label
@ -354,9 +354,9 @@ subroutine homogenization_init
thisOutput => hydrogenflux_cahnhilliard_output
thisSize => hydrogenflux_cahnhilliard_sizePostResult
case default
knownHydrogenflux = .false.
valid = .false.
end select
if (knownHydrogenflux) then
if (valid) then
write(FILEUNIT,'(a)') '(hydrogenflux)'//char(9)//trim(outputName)
if (hydrogenflux_type(p) /= HYDROGENFLUX_isoconc_ID) then
do e = 1,thisNoutput(i)

View File

@ -100,8 +100,6 @@ subroutine homogenization_RGC_init(fileUnit)
FE_geomtype
use IO
use material
use numerics, only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit !< file pointer to material configuration
@ -117,11 +115,9 @@ subroutine homogenization_RGC_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_RGC_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_RGC_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(homogenization_type == HOMOGENIZATION_RGC_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -62,8 +62,6 @@ subroutine homogenization_isostrain_init(fileUnit)
debug_levelBasic
use IO
use material
use numerics, only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -80,11 +78,9 @@ subroutine homogenization_isostrain_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_ISOSTRAIN_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_ISOSTRAIN_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = count(homogenization_type == HOMOGENIZATION_ISOSTRAIN_ID)
if (maxNinstance == 0) return

View File

@ -29,21 +29,17 @@ subroutine homogenization_none_init()
use IO, only: &
IO_timeStamp
use material
use numerics, only: &
worldrank
implicit none
integer(pInt) :: &
homog, &
NofMyHomog
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_NONE_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- homogenization_'//HOMOGENIZATION_NONE_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
initializeInstances: do homog = 1_pInt, material_Nhomogenization
initializeInstances: do homog = 1_pInt, material_Nhomogenization
myhomog: if (homogenization_type(homog) == HOMOGENIZATION_none_ID) then
NofMyHomog = count(material_homog == homog)

View File

@ -84,8 +84,6 @@ subroutine hydrogenflux_cahnhilliard_init(fileUnit)
hydrogenflux_initialCh, &
material_partHomogenization, &
material_partPhase
use numerics,only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -98,11 +96,9 @@ subroutine hydrogenflux_cahnhilliard_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- hydrogenflux_'//HYDROGENFLUX_cahnhilliard_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- hydrogenflux_'//HYDROGENFLUX_cahnhilliard_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(hydrogenflux_type == HYDROGENFLUX_cahnhilliard_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -27,21 +27,17 @@ subroutine hydrogenflux_isoconc_init()
use IO, only: &
IO_timeStamp
use material
use numerics, only: &
worldrank
implicit none
integer(pInt) :: &
homog, &
NofMyHomog
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- hydrogenflux_'//HYDROGENFLUX_isoconc_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- hydrogenflux_'//HYDROGENFLUX_isoconc_label//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
initializeInstances: do homog = 1_pInt, material_Nhomogenization
initializeInstances: do homog = 1_pInt, material_Nhomogenization
myhomog: if (hydrogenflux_type(homog) == HYDROGENFLUX_isoconc_ID) then
NofMyHomog = count(material_homog == homog)

View File

@ -81,8 +81,6 @@ subroutine kinematics_cleavage_opening_init(fileUnit)
KINEMATICS_cleavage_opening_ID, &
material_Nphase, &
MATERIAL_partPhase
use numerics,only: &
worldrank
use lattice, only: &
lattice_maxNcleavageFamily, &
lattice_NcleavageSystem
@ -97,11 +95,9 @@ subroutine kinematics_cleavage_opening_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_cleavage_opening_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(phase_kinematics == KINEMATICS_cleavage_opening_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -81,8 +81,6 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
KINEMATICS_slipplane_opening_ID, &
material_Nphase, &
MATERIAL_partPhase
use numerics,only: &
worldrank
use lattice, only: &
lattice_maxNslipFamily, &
lattice_NslipSystem
@ -97,11 +95,9 @@ subroutine kinematics_slipplane_opening_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_slipplane_opening_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(phase_kinematics == KINEMATICS_slipplane_opening_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -71,8 +71,6 @@ subroutine kinematics_thermal_expansion_init(fileUnit)
KINEMATICS_thermal_expansion_ID, &
material_Nphase, &
MATERIAL_partPhase
use numerics,only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -83,11 +81,9 @@ subroutine kinematics_thermal_expansion_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_thermal_expansion_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(phase_kinematics == KINEMATICS_thermal_expansion_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -71,8 +71,6 @@ subroutine kinematics_vacancy_strain_init(fileUnit)
KINEMATICS_vacancy_strain_ID, &
material_Nphase, &
MATERIAL_partPhase
use numerics,only: &
worldrank
implicit none
integer(pInt), intent(in) :: fileUnit
@ -83,11 +81,9 @@ subroutine kinematics_vacancy_strain_init(fileUnit)
tag = '', &
line = ''
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_vacancy_strain_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
write(6,'(/,a)') ' <<<+- kinematics_'//KINEMATICS_vacancy_strain_LABEL//' init -+>>>'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess
maxNinstance = int(count(phase_kinematics == KINEMATICS_vacancy_strain_ID),pInt)
if (maxNinstance == 0_pInt) return

View File

@ -96,19 +96,19 @@ module lattice
real(pReal), dimension(3+3,LATTICE_fcc_Nslip), parameter, private :: &
LATTICE_fcc_systemSlip = reshape(real([&
! Slip direction Plane normal
0, 1,-1, 1, 1, 1, &
-1, 0, 1, 1, 1, 1, &
1,-1, 0, 1, 1, 1, &
0,-1,-1, -1,-1, 1, &
1, 0, 1, -1,-1, 1, &
-1, 1, 0, -1,-1, 1, &
0,-1, 1, 1,-1,-1, &
-1, 0,-1, 1,-1,-1, &
1, 1, 0, 1,-1,-1, &
0, 1, 1, -1, 1,-1, &
1, 0,-1, -1, 1,-1, &
-1,-1, 0, -1, 1,-1 &
! Slip direction Plane normal ! SCHMID-BOAS notation
0, 1,-1, 1, 1, 1, & ! B2
-1, 0, 1, 1, 1, 1, & ! B4
1,-1, 0, 1, 1, 1, & ! B5
0,-1,-1, -1,-1, 1, & ! C1
1, 0, 1, -1,-1, 1, & ! C3
-1, 1, 0, -1,-1, 1, & ! C5
0,-1, 1, 1,-1,-1, & ! A2
-1, 0,-1, 1,-1,-1, & ! A3
1, 1, 0, 1,-1,-1, & ! A6
0, 1, 1, -1, 1,-1, & ! D1
1, 0,-1, -1, 1,-1, & ! D4
-1,-1, 0, -1, 1,-1 & ! D6
],pReal),[ 3_pInt + 3_pInt,LATTICE_fcc_Nslip]) !< Slip system <110>{111} directions. Sorted according to Eisenlohr & Hantcherli
real(pReal), dimension(3+3,LATTICE_fcc_Ntwin), parameter, private :: &

View File

@ -178,7 +178,7 @@ subroutine math_init
compiler_version, &
compiler_options
#endif
use numerics, only: fixedSeed
use numerics, only: randomSeed
use IO, only: IO_timeStamp
implicit none
@ -195,8 +195,8 @@ subroutine math_init
call random_seed(size=randSize)
if (allocated(randInit)) deallocate(randInit)
allocate(randInit(randSize))
if (fixedSeed > 0_pInt) then
randInit(1:randSize) = int(fixedSeed) ! fixedSeed is of type pInt, randInit not
if (randomSeed > 0_pInt) then
randInit(1:randSize) = int(randomSeed) ! randomSeed is of type pInt, randInit not
call random_seed(put=randInit)
else
call random_seed()

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,7 @@ module numerics
nState = 10_pInt, & !< state loop limit
nStress = 40_pInt, & !< stress loop limit
pert_method = 1_pInt, & !< method used in perturbation technique for tangent
fixedSeed = 0_pInt, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed
randomSeed = 0_pInt, & !< fixed seeding for pseudo-random number generator, Default 0: use random seed
worldrank = 0_pInt, & !< MPI worldrank (/=0 for MPI simulations only)
worldsize = 0_pInt !< MPI worldsize (/=0 for MPI simulations only)
integer(4), protected, public :: &
@ -120,9 +120,9 @@ module numerics
petsc_options = ''
integer(pInt), protected, public :: &
fftw_planner_flag = 32_pInt, & !< conversion of fftw_plan_mode to integer, basically what is usually done in the include file of fftw
continueCalculation = 0_pInt, & !< 0: exit if BVP solver does not converge, 1: continue calculation if BVP solver does not converge
divergence_correction = 2_pInt !< correct divergence calculation in fourier space 0: no correction, 1: size scaled to 1, 2: size scaled to Npoints
logical, protected, public :: &
continueCalculation = .false., & !< false:exit if BVP solver does not converge, true: continue calculation despite BVP solver not converging
memory_efficient = .true., & !< for fast execution (pre calculation of gamma_hat), Default .true.: do not precalculate
update_gamma = .false. !< update gamma operator with current stiffness, Default .false.: use initial stiffness
#endif
@ -359,8 +359,8 @@ subroutine numerics_init
!--------------------------------------------------------------------------------------------------
! random seeding parameter
case ('fixed_seed')
fixedSeed = IO_intValue(line,chunkPos,2_pInt)
case ('random_seed','fixed_seed')
randomSeed = IO_intValue(line,chunkPos,2_pInt)
!--------------------------------------------------------------------------------------------------
! gradient parameter
@ -424,9 +424,9 @@ subroutine numerics_init
case ('err_stress_tolabs')
err_stress_tolabs = IO_floatValue(line,chunkPos,2_pInt)
case ('continuecalculation')
continueCalculation = IO_intValue(line,chunkPos,2_pInt)
continueCalculation = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
case ('memory_efficient')
memory_efficient = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
memory_efficient = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
case ('fftw_timelimit')
fftw_timelimit = IO_floatValue(line,chunkPos,2_pInt)
case ('fftw_plan_mode')
@ -436,7 +436,7 @@ subroutine numerics_init
case ('divergence_correction')
divergence_correction = IO_intValue(line,chunkPos,2_pInt)
case ('update_gamma')
update_gamma = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
update_gamma = IO_intValue(line,chunkPos,2_pInt) > 0_pInt
case ('petsc_options')
petsc_options = trim(line(chunkPos(4):))
case ('spectralsolver','myspectralsolver')
@ -560,9 +560,9 @@ subroutine numerics_init
!--------------------------------------------------------------------------------------------------
! Random seeding parameter
write(6,'(a24,1x,i16,/)') ' fixed_seed: ',fixedSeed
if (fixedSeed <= 0_pInt) &
write(6,'(a,/)') ' No fixed Seed: Random is random!'
write(6,'(a24,1x,i16,/)') ' random_seed: ',randomSeed
if (randomSeed <= 0_pInt) &
write(6,'(a,/)') ' random seed will be generated!'
!--------------------------------------------------------------------------------------------------
! gradient parameter
@ -599,7 +599,7 @@ subroutine numerics_init
!--------------------------------------------------------------------------------------------------
! spectral parameters
#ifdef Spectral
write(6,'(a24,1x,i8)') ' continueCalculation: ',continueCalculation
write(6,'(a24,1x,L8)') ' continueCalculation: ',continueCalculation
write(6,'(a24,1x,L8)') ' memory_efficient: ',memory_efficient
write(6,'(a24,1x,i8)') ' divergence_correction: ',divergence_correction
write(6,'(a24,1x,a)') ' spectral_derivative: ',trim(spectral_derivative)
@ -698,8 +698,6 @@ subroutine numerics_init
if (err_hydrogenflux_tolabs <= 0.0_pReal) call IO_error(301_pInt,ext_msg='err_hydrogenflux_tolabs')
if (err_hydrogenflux_tolrel <= 0.0_pReal) call IO_error(301_pInt,ext_msg='err_hydrogenflux_tolrel')
#ifdef Spectral
if (continueCalculation /= 0_pInt .and. &
continueCalculation /= 1_pInt) call IO_error(301_pInt,ext_msg='continueCalculation')
if (divergence_correction < 0_pInt .or. &
divergence_correction > 2_pInt) call IO_error(301_pInt,ext_msg='divergence_correction')
if (update_gamma .and. &
@ -713,7 +711,7 @@ subroutine numerics_init
if (polarAlpha <= 0.0_pReal .or. &
polarAlpha > 2.0_pReal) call IO_error(301_pInt,ext_msg='polarAlpha')
if (polarBeta < 0.0_pReal .or. &
polarBeta > 2.0_pReal) call IO_error(301_pInt,ext_msg='polarBeta')
polarBeta > 2.0_pReal) call IO_error(301_pInt,ext_msg='polarBeta')
#endif
end subroutine numerics_init

View File

@ -1178,7 +1178,7 @@ end subroutine plastic_disloUCLA_dotState
function plastic_disloUCLA_postResults(Tstar_v,Temperature,ipc,ip,el)
use prec, only: &
tol_math_check, &
dEq
dEq, dNeq0
use math, only: &
pi
use material, only: &
@ -1445,9 +1445,13 @@ function plastic_disloUCLA_postResults(Tstar_v,Temperature,ipc,ip,el)
index_myFamily = sum(lattice_NslipSystem(1:f-1_pInt,ph)) ! at which index starts my family
slipSystems2: do i = 1_pInt,plastic_disloUCLA_Nslip(f,instance)
j = j + 1_pInt
if (dNeq0(abs(dot_product(Tstar_v,lattice_Sslip_v(:,1,index_myFamily+i,ph))))) then
plastic_disloUCLA_postResults(c+j) = &
(3.0_pReal*lattice_mu(ph)*plastic_disloUCLA_burgersPerSlipSystem(j,instance))/&
(16.0_pReal*pi*abs(dot_product(Tstar_v,lattice_Sslip_v(:,1,index_myFamily+i,ph))))
else
plastic_disloUCLA_postResults(c+j) = huge(1.0_pReal)
endif
plastic_disloUCLA_postResults(c+j)=min(plastic_disloUCLA_postResults(c+j),&
state(instance)%mfp_slip(j,of))
enddo slipSystems2; enddo slipFamilies2

View File

@ -1029,7 +1029,7 @@ subroutine plastic_dislotwin_init(fileUnit)
do p = 1_pInt,3_pInt; do q = 1_pInt,3_pInt; do r = 1_pInt,3_pInt; do s = 1_pInt,3_pInt
plastic_dislotwin_Ctwin3333(l,m,n,o,index_myFamily+j,instance) = &
plastic_dislotwin_Ctwin3333(l,m,n,o,index_myFamily+j,instance) + &
lattice_C3333(p,q,r,s,instance) * &
lattice_C3333(p,q,r,s,phase) * &
lattice_Qtwin(l,p,index_otherFamily+j,phase) * &
lattice_Qtwin(m,q,index_otherFamily+j,phase) * &
lattice_Qtwin(n,r,index_otherFamily+j,phase) * &
@ -1087,7 +1087,7 @@ subroutine plastic_dislotwin_init(fileUnit)
do p = 1_pInt,3_pInt; do q = 1_pInt,3_pInt; do r = 1_pInt,3_pInt; do s = 1_pInt,3_pInt
plastic_dislotwin_Ctrans3333(l,m,n,o,index_myFamily+j,instance) = &
plastic_dislotwin_Ctrans3333(l,m,n,o,index_myFamily+j,instance) + &
lattice_trans_C3333(p,q,r,s,instance) * &
lattice_trans_C3333(p,q,r,s,phase) * &
lattice_Qtrans(l,p,index_otherFamily+j,phase) * &
lattice_Qtrans(m,q,index_otherFamily+j,phase) * &
lattice_Qtrans(n,r,index_otherFamily+j,phase) * &

View File

@ -137,6 +137,7 @@ end subroutine prec_init
!> @brief equality comparison for float with double precision
! replaces "==" but for certain (relative) tolerance. Counterpart to dNeq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! AlmostEqualRelative
!--------------------------------------------------------------------------------------------------
logical elemental pure function dEq(a,b,tol)
@ -153,6 +154,7 @@ end function dEq
!> @brief inequality comparison for float with double precision
! replaces "!=" but for certain (relative) tolerance. Counterpart to dEq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! AlmostEqualRelative NOT
!--------------------------------------------------------------------------------------------------
logical elemental pure function dNeq(a,b,tol)
@ -167,33 +169,35 @@ end function dNeq
!--------------------------------------------------------------------------------------------------
!> @brief equality to 0 comparison for float with double precision
! replaces "==0" but for certain (absolute) tolerance. Counterpart to dNeq0
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! replaces "==0" but everything not representable as a normal number is treated as 0. Counterpart to dNeq0
! https://de.mathworks.com/help/matlab/ref/realmin.html
! https://docs.oracle.com/cd/E19957-01/806-3568/ncg_math.html
!--------------------------------------------------------------------------------------------------
logical elemental pure function dEq0(a,tol)
implicit none
real(pReal), intent(in) :: a
real(pReal), intent(in), optional :: tol
real(pReal), parameter :: eps = 2.220446049250313E-16 ! DBL_EPSILON in C
real(pReal), parameter :: eps = 2.2250738585072014E-308 ! smallest non-denormalized number
dEq0 = merge(.True., .False.,abs(a) <= merge(tol,eps,present(tol))*10.0_pReal)
dEq0 = merge(.True., .False.,abs(a) <= merge(tol,eps,present(tol)))
end function dEq0
!--------------------------------------------------------------------------------------------------
!> @brief inequality to 0 comparison for float with double precision
! replaces "!=0" but for certain (absolute) tolerance. Counterpart to dEq0
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! replaces "!=0" but everything not representable as a normal number is treated as 0. Counterpart to dEq0
! https://de.mathworks.com/help/matlab/ref/realmin.html
! https://docs.oracle.com/cd/E19957-01/806-3568/ncg_math.html
!--------------------------------------------------------------------------------------------------
logical elemental pure function dNeq0(a,tol)
implicit none
real(pReal), intent(in) :: a
real(pReal), intent(in), optional :: tol
real(pReal), parameter :: eps = 2.220446049250313E-16 ! DBL_EPSILON in C
real(pReal), parameter :: eps = 2.2250738585072014E-308 ! smallest non-denormalized number
dNeq0 = merge(.False., .True.,abs(a) <= merge(tol,eps,present(tol))*10.0_pReal)
dNeq0 = merge(.False., .True.,abs(a) <= merge(tol,eps,present(tol)))
end function dNeq0

View File

@ -213,8 +213,9 @@ subroutine AL_init
endif restart
call Utilities_updateIPcoords(reshape(F,shape(F_lastInc)))
call Utilities_constitutiveResponse(F_lastInc, reshape(F,shape(F_lastInc)), &
0.0_pReal,P,C_volAvg,C_minMaxAvg,temp33_Real,.false.,math_I3)
call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, &
reshape(F,shape(F_lastInc)), 0.0_pReal, math_I3)
nullify(F)
nullify(F_lambda)
call DMDAVecRestoreArrayF90(da,solution_vec,xx_psc,ierr); CHKERRQ(ierr) ! write data back to PETSc
@ -364,12 +365,10 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
DMDALocalInfo, dimension(&
DMDA_LOCAL_INFO_SIZE) :: &
in
PetscScalar, target, dimension(3,3,2, &
XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: &
x_scal
PetscScalar, target, dimension(3,3,2, &
X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: &
f_scal
PetscScalar, &
target, dimension(3,3,2, XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: x_scal
PetscScalar, &
target, dimension(3,3,2, X_RANGE, Y_RANGE, Z_RANGE), intent(out) :: f_scal
PetscScalar, pointer, dimension(:,:,:,:,:) :: &
F, &
F_lambda, &
@ -441,8 +440,9 @@ subroutine AL_formResidual(in,x_scal,f_scal,dummy,ierr)
!--------------------------------------------------------------------------------------------------
! evaluate constitutive response
P_avLastEval = P_av
call Utilities_constitutiveResponse(F_lastInc,F - residual_F_lambda/polarBeta,params%timeinc, &
residual_F,C_volAvg,C_minMaxAvg,P_av,ForwardData,params%rotation_BC)
call Utilities_constitutiveResponse(residual_F,P_av,C_volAvg,C_minMaxAvg, &
F - residual_F_lambda/polarBeta,params%timeinc, params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
ForwardData = .False.
@ -655,10 +655,12 @@ subroutine AL_forward(guess,timeinc,timeinc_old,loadCaseTime,deformation_BC,stre
!--------------------------------------------------------------------------------------------------
! update coordinates and rate and forward last inc
call utilities_updateIPcoords(F)
Fdot = Utilities_calculateRate(math_rotate_backward33(f_aimDot,rotation_BC), &
timeinc_old,guess,F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]))
F_lambdaDot = Utilities_calculateRate(math_rotate_backward33(f_aimDot,rotation_BC), &
timeinc_old,guess,F_lambda_lastInc,reshape(F_lambda,[3,3,grid(1),grid(2),grid3]))
Fdot = Utilities_calculateRate(guess, &
F_lastInc, reshape(F, [3,3,grid(1),grid(2),grid3]), timeinc_old, &
math_rotate_backward33(f_aimDot,rotation_BC))
F_lambdaDot = Utilities_calculateRate(guess, &
F_lambda_lastInc,reshape(F_lambda,[3,3,grid(1),grid(2),grid3]), timeinc_old, &
math_rotate_backward33(f_aimDot,rotation_BC))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3])
F_lambda_lastInc = reshape(F_lambda,[3,3,grid(1),grid(2),grid3])
endif

View File

@ -39,16 +39,16 @@ module spectral_mech_basic
! stress, stiffness and compliance average etc.
real(pReal), private, dimension(3,3) :: &
F_aim = math_I3, &
F_aim_lastIter = math_I3, &
F_aim_lastInc = math_I3, &
P_av = 0.0_pReal, &
F_aimDot=0.0_pReal
F_aimDot = 0.0_pReal
character(len=1024), private :: incInfo
real(pReal), private, dimension(3,3,3,3) :: &
C_volAvg = 0.0_pReal, & !< current volume average stiffness
C_volAvgLastInc = 0.0_pReal, & !< previous volume average stiffness
C_minMaxAvg = 0.0_pReal, & !< current (min+max)/2 stiffness
S = 0.0_pReal !< current compliance (filled up with zeros)
C_minMaxAvgLastInc = 0.0_pReal, & !< previous (min+max)/2 stiffness
S = 0.0_pReal !< current compliance (filled up with zeros)
real(pReal), private :: err_stress, err_div
logical, private :: ForwardData
integer(pInt), private :: &
@ -69,7 +69,7 @@ module spectral_mech_basic
contains
!--------------------------------------------------------------------------------------------------
!> @brief allocates all neccessary fields and fills them with data, potentially from restart info
!> @brief allocates all necessary fields and fills them with data, potentially from restart info
!--------------------------------------------------------------------------------------------------
subroutine basicPETSc_init
#ifdef __GFORTRAN__
@ -90,6 +90,8 @@ subroutine basicPETSc_init
use numerics, only: &
worldrank, &
worldsize
use homogenization, only: &
materialpoint_F0
use DAMASK_interface, only: &
getSolverJobName
use spectral_utilities, only: &
@ -172,14 +174,11 @@ subroutine basicPETSc_init
flush(6)
write(rankStr,'(a1,i0)')'_',worldrank
call IO_read_realFile(777,'F'//trim(rankStr),trim(getSolverJobName()),size(F))
read (777,rec=1) F
close (777)
read (777,rec=1) F; close (777)
call IO_read_realFile(777,'F_lastInc'//trim(rankStr),trim(getSolverJobName()),size(F_lastInc))
read (777,rec=1) F_lastInc
close (777)
read (777,rec=1) F_lastInc; close (777)
call IO_read_realFile(777,'F_aimDot',trim(getSolverJobName()),size(f_aimDot))
read (777,rec=1) f_aimDot
close (777)
read (777,rec=1) f_aimDot; close (777)
F_aim = reshape(sum(sum(sum(F,dim=4),dim=3),dim=2) * wgt, [3,3]) ! average of F
F_aim_lastInc = sum(sum(sum(F_lastInc,dim=5),dim=4),dim=3) * wgt ! average of F_lastInc
elseif (restartInc == 1_pInt) then restart
@ -187,34 +186,29 @@ subroutine basicPETSc_init
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3])
endif restart
materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent
call Utilities_updateIPcoords(reshape(F,shape(F_lastInc)))
call Utilities_constitutiveResponse(F_lastInc, reshape(F,shape(F_lastInc)), &
0.0_pReal, &
P, &
C_volAvg,C_minMaxAvg, & ! global average of stiffness and (min+max)/2
temp33_Real, &
.false., &
math_I3)
call Utilities_constitutiveResponse(P, temp33_Real, C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F
0.0_pReal, & ! time increment
math_I3) ! no rotation of boundary condition
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) ! write data back to PETSc
! QUESTION: why not writing back right after reading (l.189)?
restartRead: if (restartInc > 1_pInt) then ! QUESTION: are those values not calc'ed by constitutiveResponse? why reading from file?
if (iand(debug_level(debug_spectral),debug_spectralRestart)/= 0 .and. worldrank == 0_pInt) &
write(6,'(/,a,'//IO_intOut(restartInc-1_pInt)//',a)') &
'reading more values of increment', restartInc - 1_pInt, 'from file'
'reading more values of increment', restartInc-1_pInt, 'from file'
flush(6)
call IO_read_realFile(777,'C_volAvg',trim(getSolverJobName()),size(C_volAvg))
read (777,rec=1) C_volAvg
close (777)
read (777,rec=1) C_volAvg; close (777)
call IO_read_realFile(777,'C_volAvgLastInc',trim(getSolverJobName()),size(C_volAvgLastInc))
read (777,rec=1) C_volAvgLastInc
close (777)
read (777,rec=1) C_volAvgLastInc; close (777)
call IO_read_realFile(777,'C_ref',trim(getSolverJobName()),size(C_minMaxAvg))
read (777,rec=1) C_minMaxAvg
close (777)
read (777,rec=1) C_minMaxAvg; close (777)
endif restartRead
call Utilities_updateGamma(C_minmaxAvg,.True.)
call Utilities_updateGamma(C_minmaxAvg,.true.)
end subroutine basicPETSc_init
@ -238,13 +232,13 @@ type(tSolutionState) function basicPETSc_solution(incInfoIn,timeinc,timeinc_old,
!--------------------------------------------------------------------------------------------------
! input data for solution
real(pReal), intent(in) :: &
timeinc, & !< increment in time for current solution
timeinc_old !< increment in time of last increment
type(tBoundaryCondition), intent(in) :: &
stress_BC
character(len=*), intent(in) :: &
incInfoIn
real(pReal), intent(in) :: &
timeinc, & !< increment time for current solution
timeinc_old !< increment time of last successful increment
type(tBoundaryCondition), intent(in) :: &
stress_BC
real(pReal), dimension(3,3), intent(in) :: rotation_BC
!--------------------------------------------------------------------------------------------------
@ -279,13 +273,19 @@ type(tSolutionState) function basicPETSc_solution(incInfoIn,timeinc,timeinc_old,
!--------------------------------------------------------------------------------------------------
! check convergence
call SNESGetConvergedReason(snes,reason,ierr)
CHKERRQ(ierr)
call SNESGetConvergedReason(snes,reason,ierr); CHKERRQ(ierr)
BasicPETSc_solution%converged = reason > 0
basicPETSC_solution%iterationsNeeded = totalIter
basicPETSc_solution%termIll = terminallyIll
terminallyIll = .false.
<<<<<<< HEAD
if (reason == -4) call IO_error(893_pInt)
BasicPETSc_solution%converged = reason > 0
basicPETSC_solution%iterationsNeeded = totalIter
=======
if (reason == -4) call IO_error(893_pInt) ! MPI error
>>>>>>> spectralSolver-cutbackfix
end function BasicPETSc_solution
@ -321,19 +321,18 @@ subroutine BasicPETSC_formResidual(in,x_scal,f_scal,dummy,ierr)
terminallyIll
implicit none
DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: &
in
PetscScalar, dimension(3,3, &
XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: &
x_scal
PetscScalar, dimension(3,3, &
X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: &
f_scal
DMDALocalInfo, dimension(DMDA_LOCAL_INFO_SIZE) :: in
PetscScalar, &
dimension(3,3, XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: x_scal !< what is this?
PetscScalar, &
dimension(3,3, X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: f_scal !< what is this?
PetscInt :: &
PETScIter, &
nfuncs
PetscObject :: dummy
PetscErrorCode :: ierr
real(pReal), dimension(3,3) :: &
deltaF_aim
external :: &
SNESGetNumberFunctionEvals, &
@ -343,45 +342,48 @@ subroutine BasicPETSC_formResidual(in,x_scal,f_scal,dummy,ierr)
call SNESGetIterationNumber(snes,PETScIter,ierr); CHKERRQ(ierr)
if (nfuncs == 0 .and. PETScIter == 0) totalIter = -1_pInt ! new increment
<<<<<<< HEAD
newIteration: if (totalIter <= PETScIter) then
=======
>>>>>>> spectralSolver-cutbackfix
!--------------------------------------------------------------------------------------------------
! report begin of new iteration
! begin of new iteration
newIteration: if (totalIter <= PETScIter) then
totalIter = totalIter + 1_pInt
write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') trim(incInfo), &
' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
write(6,'(1x,a,3(a,'//IO_intOut(itmax)//'))') &
trim(incInfo), ' @ Iteration ', itmin, '≤',totalIter, '≤', itmax
if (iand(debug_level(debug_spectral),debug_spectralRotation) /= 0) &
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') ' deformation gradient aim (lab) =', &
math_transpose33(math_rotate_backward33(F_aim,params%rotation_BC))
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') ' deformation gradient aim =', &
math_transpose33(F_aim)
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim (lab) =', math_transpose33(math_rotate_backward33(F_aim,params%rotation_BC))
write(6,'(/,a,/,3(3(f12.7,1x)/))',advance='no') &
' deformation gradient aim =', math_transpose33(F_aim)
flush(6)
endif newIteration
!--------------------------------------------------------------------------------------------------
! evaluate constitutive response
call Utilities_constitutiveResponse(F_lastInc,x_scal,params%timeinc, &
f_scal,C_volAvg,C_minmaxAvg,P_av,ForwardData,params%rotation_BC)
call Utilities_constitutiveResponse(f_scal,P_av,C_volAvg,C_minmaxAvg, &
x_scal,params%timeinc, params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
ForwardData = .false.
!--------------------------------------------------------------------------------------------------
! stress BC handling
F_aim_lastIter = F_aim
F_aim = F_aim - math_mul3333xx33(S, ((P_av - params%stress_BC))) ! S = 0.0 for no bc
err_stress = maxval(abs(mask_stress * (P_av - params%stress_BC))) ! mask = 0.0 for no bc
deltaF_aim = math_mul3333xx33(S, P_av - params%stress_BC)
F_aim = F_aim - deltaF_aim
err_stress = maxval(abs(mask_stress * (P_av - params%stress_BC))) ! mask = 0.0 when no stress bc
!--------------------------------------------------------------------------------------------------
! updated deformation gradient using fix point algorithm of basic scheme
tensorField_real = 0.0_pReal
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = f_scal
call utilities_FFTtensorForward()
err_div = Utilities_divergenceRMS()
call utilities_fourierGammaConvolution(math_rotate_backward33(F_aim_lastIter-F_aim,params%rotation_BC))
call utilities_FFTtensorBackward()
call utilities_FFTtensorForward() ! FFT forward of global "tensorField_real"
err_div = Utilities_divergenceRMS() ! divRMS of tensorField_fourier
call utilities_fourierGammaConvolution(math_rotate_backward33(deltaF_aim,params%rotation_BC)) ! convolution of Gamma and tensorField_fourier, with arg
call utilities_FFTtensorBackward() ! FFT backward of global tensorField_fourier
!--------------------------------------------------------------------------------------------------
! constructing residual
f_scal = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3)
f_scal = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too
end subroutine BasicPETSc_formResidual
@ -442,8 +444,11 @@ end subroutine BasicPETSc_converged
!--------------------------------------------------------------------------------------------------
!> @brief forwarding routine
!> @details find new boundary conditions and best F estimate for end of current timestep
!> possibly writing restart information, triggering of state increment in DAMASK, and updating of IPcoordinates
!--------------------------------------------------------------------------------------------------
subroutine BasicPETSc_forward(guess,timeinc,timeinc_old,loadCaseTime,deformation_BC,stress_BC,rotation_BC)
<<<<<<< HEAD
use math, only: &
math_mul33x33 ,&
math_rotate_backward33
@ -542,6 +547,118 @@ subroutine BasicPETSc_forward(guess,timeinc,timeinc_old,loadCaseTime,deformation
math_rotate_backward33(F_aim,rotation_BC)),[9,grid(1),grid(2),grid3])
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
=======
use math, only: &
math_mul33x33 ,&
math_rotate_backward33
use numerics, only: &
worldrank
use homogenization, only: &
materialpoint_F0
use mesh, only: &
grid, &
grid3
use CPFEM2, only: &
CPFEM_age
use spectral_utilities, only: &
Utilities_calculateRate, &
Utilities_forwardField, &
Utilities_updateIPcoords, &
tBoundaryCondition, &
cutBack
use IO, only: &
IO_write_JobRealFile
use FEsolving, only: &
restartWrite
implicit none
logical, intent(in) :: &
guess
real(pReal), intent(in) :: &
timeinc_old, &
timeinc, &
loadCaseTime !< remaining time of current load case
type(tBoundaryCondition), intent(in) :: &
stress_BC, &
deformation_BC
real(pReal), dimension(3,3), intent(in) ::&
rotation_BC
PetscErrorCode :: ierr
PetscScalar, pointer :: F(:,:,:,:)
character(len=32) :: rankStr
call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
if (cutBack) then
C_volAvg = C_volAvgLastInc ! QUESTION: where is this required?
C_minMaxAvg = C_minMaxAvgLastInc ! QUESTION: where is this required?
else
!--------------------------------------------------------------------------------------------------
! restart information for spectral solver
if (restartWrite) then ! QUESTION: where is this logical properly set?
write(6,'(/,a)') ' writing converged results for restart'
flush(6)
if (worldrank == 0_pInt) then
call IO_write_jobRealFile(777,'C_volAvg',size(C_volAvg))
write (777,rec=1) C_volAvg; close(777)
call IO_write_jobRealFile(777,'C_volAvgLastInc',size(C_volAvgLastInc))
write (777,rec=1) C_volAvgLastInc; close(777)
call IO_write_jobRealFile(777,'C_minMaxAvg',size(C_volAvg))
write (777,rec=1) C_minMaxAvg; close(777)
call IO_write_jobRealFile(777,'C_minMaxAvgLastInc',size(C_volAvgLastInc))
write (777,rec=1) C_minMaxAvgLastInc; close(777)
endif
write(rankStr,'(a1,i0)')'_',worldrank
call IO_write_jobRealFile(777,'F'//trim(rankStr),size(F)) ! writing deformation gradient field to file
write (777,rec=1) F; close (777)
call IO_write_jobRealFile(777,'F_lastInc'//trim(rankStr),size(F_lastInc)) ! writing F_lastInc field to file
write (777,rec=1) F_lastInc; close (777)
endif
call CPFEM_age() ! age state and kinematics
call utilities_updateIPcoords(F)
C_volAvgLastInc = C_volAvg
C_minMaxAvgLastInc = C_minMaxAvg
if (guess) then ! QUESTION: better with a = L ? x:y
F_aimDot = stress_BC%maskFloat * (F_aim - F_aim_lastInc)/timeinc_old ! initialize with correction based on last inc
else
F_aimDot = 0.0_pReal
endif
F_aim_lastInc = F_aim
!--------------------------------------------------------------------------------------------------
! calculate rate for aim
if (deformation_BC%myType=='l') then ! calculate f_aimDot from given L and current F
F_aimDot = &
F_aimDot + deformation_BC%maskFloat * math_mul33x33(deformation_BC%values, F_aim_lastInc)
elseif(deformation_BC%myType=='fdot') then ! f_aimDot is prescribed
F_aimDot = &
F_aimDot + deformation_BC%maskFloat * deformation_BC%values
elseif (deformation_BC%myType=='f') then ! aim at end of load case is prescribed
F_aimDot = &
F_aimDot + deformation_BC%maskFloat * (deformation_BC%values - F_aim_lastInc)/loadCaseTime
endif
Fdot = Utilities_calculateRate(guess, &
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),timeinc_old, &
math_rotate_backward33(f_aimDot,rotation_BC))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3]) ! winding F forward
materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3]) ! set starting condition for materialpoint_stressAndItsTangent
endif
!--------------------------------------------------------------------------------------------------
! update average and local deformation gradients
F_aim = F_aim_lastInc + f_aimDot * timeinc
F = reshape(Utilities_forwardField(timeinc,F_lastInc,Fdot, & ! estimate of F at end of time+timeinc that matches rotated F_aim on average
math_rotate_backward33(F_aim,rotation_BC)),[9,grid(1),grid(2),grid3])
call DMDAVecRestoreArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr)
>>>>>>> spectralSolver-cutbackfix
end subroutine BasicPETSc_forward
!--------------------------------------------------------------------------------------------------

View File

@ -213,8 +213,8 @@ subroutine Polarisation_init
endif restart
call Utilities_updateIPcoords(reshape(F,shape(F_lastInc)))
call Utilities_constitutiveResponse(F_lastInc, reshape(F,shape(F_lastInc)), &
0.0_pReal,P,C_volAvg,C_minMaxAvg,temp33_Real,.false.,math_I3)
call Utilities_constitutiveResponse(P,temp33_Real,C_volAvg,C_minMaxAvg, &
reshape(F,shape(F_lastInc)),0.0_pReal,math_I3)
nullify(F)
nullify(F_tau)
call DMDAVecRestoreArrayF90(da,solution_vec,xx_psc,ierr); CHKERRQ(ierr) ! write data back to PETSc
@ -364,12 +364,10 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
DMDALocalInfo, dimension(&
DMDA_LOCAL_INFO_SIZE) :: &
in
PetscScalar, target, dimension(3,3,2, &
XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: &
x_scal
PetscScalar, target, dimension(3,3,2, &
X_RANGE,Y_RANGE,Z_RANGE), intent(out) :: &
f_scal
PetscScalar, &
target, dimension(3,3,2, XG_RANGE,YG_RANGE,ZG_RANGE), intent(in) :: x_scal
PetscScalar, &
target, dimension(3,3,2, X_RANGE, Y_RANGE, Z_RANGE), intent(out) :: f_scal
PetscScalar, pointer, dimension(:,:,:,:,:) :: &
F, &
F_tau, &
@ -440,8 +438,8 @@ subroutine Polarisation_formResidual(in,x_scal,f_scal,dummy,ierr)
!--------------------------------------------------------------------------------------------------
! evaluate constitutive response
P_avLastEval = P_av
call Utilities_constitutiveResponse(F_lastInc,F - residual_F_tau/polarBeta,params%timeinc, &
residual_F,C_volAvg,C_minMaxAvg,P_av,ForwardData,params%rotation_BC)
call Utilities_constitutiveResponse(residual_F,P_av,C_volAvg,C_minMaxAvg, &
F - residual_F_tau/polarBeta,params%timeinc,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1,MPI_LOGICAL,MPI_LOR,PETSC_COMM_WORLD,ierr)
ForwardData = .False.
@ -654,13 +652,13 @@ subroutine Polarisation_forward(guess,timeinc,timeinc_old,loadCaseTime,deformati
!--------------------------------------------------------------------------------------------------
! update coordinates and rate and forward last inc
call utilities_updateIPcoords(F)
Fdot = Utilities_calculateRate(math_rotate_backward33(f_aimDot,rotation_BC), &
timeinc_old,guess,F_lastInc, &
reshape(F,[3,3,grid(1),grid(2),grid3]))
F_tauDot = Utilities_calculateRate(math_rotate_backward33(2.0_pReal*f_aimDot,rotation_BC), &
timeinc_old,guess,F_tau_lastInc, &
reshape(F_tau,[3,3,grid(1),grid(2),grid3]))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3])
Fdot = Utilities_calculateRate(guess, &
F_lastInc, reshape(F, [3,3,grid(1),grid(2),grid3]), timeinc_old, &
math_rotate_backward33( f_aimDot,rotation_BC))
F_tauDot = Utilities_calculateRate(guess, &
F_tau_lastInc, reshape(F_tau,[3,3,grid(1),grid(2),grid3]), timeinc_old, &
math_rotate_backward33(2.0_pReal*f_aimDot,rotation_BC))
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3])
F_tau_lastInc = reshape(F_tau,[3,3,grid(1),grid(2),grid3])
endif

View File

@ -16,7 +16,7 @@ module spectral_utilities
#include <petsc/finclude/petscsys.h>
include 'fftw3-mpi.f03'
logical, public :: cutBack =.false. !< cut back of BVP solver in case convergence is not achieved or a material point is terminally ill
logical, public :: cutBack = .false. !< cut back of BVP solver in case convergence is not achieved or a material point is terminally ill
integer(pInt), public, parameter :: maxPhaseFields = 2_pInt
integer(pInt), public :: nActiveFields = 0_pInt
@ -799,7 +799,7 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
call math_invert(size_reduced, c_reduced, s_reduced, errmatinv) ! invert reduced stiffness
if (any(IEEE_is_NaN(s_reduced))) errmatinv = .true.
if(errmatinv) call IO_error(error_ID=400_pInt,ext_msg='utilities_maskedCompliance')
if (errmatinv) call IO_error(error_ID=400_pInt,ext_msg='utilities_maskedCompliance')
temp99_Real = 0.0_pReal ! fill up compliance with zeros
k = 0_pInt
do n = 1_pInt,9_pInt
@ -817,28 +817,41 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
sTimesC = matmul(c_reduced,s_reduced)
do m=1_pInt, size_reduced
do n=1_pInt, size_reduced
if(m==n .and. abs(sTimesC(m,n)) > (1.0_pReal + 10.0e-12_pReal)) errmatinv = .true. ! diagonal elements of S*C should be 1
if(m/=n .and. abs(sTimesC(m,n)) > (0.0_pReal + 10.0e-12_pReal)) errmatinv = .true. ! off diagonal elements of S*C should be 0
errmatinv = errmatinv &
.or. (m==n .and. abs(sTimesC(m,n)-1.0_pReal) > 1.0e-12_pReal) & ! diagonal elements of S*C should be 1
.or. (m/=n .and. abs(sTimesC(m,n)) > 1.0e-12_pReal) ! off-diagonal elements of S*C should be 0
enddo
enddo
<<<<<<< HEAD
if(debugGeneral .or. errmatinv) then
=======
if (debugGeneral .or. errmatinv) then
>>>>>>> spectralSolver-cutbackfix
write(formatString, '(i2)') size_reduced
formatString = '(/,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))'
write(6,trim(formatString),advance='no') ' C * S (load) ', &
transpose(matmul(c_reduced,s_reduced))
write(6,trim(formatString),advance='no') ' S (load) ', transpose(s_reduced)
if(errmatinv) call IO_error(error_ID=400_pInt,ext_msg='utilities_maskedCompliance')
endif
if(errmatinv) call IO_error(error_ID=400_pInt,ext_msg='utilities_maskedCompliance')
deallocate(c_reduced)
deallocate(s_reduced)
deallocate(sTimesC)
else
temp99_real = 0.0_pReal
endif
<<<<<<< HEAD
if(debugGeneral) &
write(6,'(/,a,/,9(9(2x,f10.5,1x)/),/)',advance='no') ' Masked Compliance (load) / GPa =', &
transpose(temp99_Real*1.e9_pReal)
flush(6)
=======
if(debugGeneral) then
write(6,'(/,a,/,9(9(2x,f10.5,1x)/),/)',advance='no') &
' Masked Compliance (load) / GPa =', transpose(temp99_Real*1.e-9_pReal)
flush(6)
endif
>>>>>>> spectralSolver-cutbackfix
utilities_maskedCompliance = math_Plain99to3333(temp99_Real)
end function utilities_maskedCompliance
@ -924,10 +937,10 @@ end subroutine utilities_fourierTensorDivergence
!--------------------------------------------------------------------------------------------------
!> @brief calculates constitutive response
!> @brief calculate constitutive response from materialpoint_F0 to F during timeinc
!--------------------------------------------------------------------------------------------------
subroutine utilities_constitutiveResponse(F_lastInc,F,timeinc, &
P,C_volAvg,C_minmaxAvg,P_av,forwardData,rotation_BC)
subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
F,timeinc,rotation_BC)
use IO, only: &
IO_error
use debug, only: &
@ -940,31 +953,22 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,timeinc, &
use mesh, only: &
grid,&
grid3
use FEsolving, only: &
restartWrite
use CPFEM2, only: &
CPFEM_general
use homogenization, only: &
materialpoint_F0, &
materialpoint_F, &
materialpoint_P, &
materialpoint_dPdF
materialpoint_dPdF, &
materialpoint_stressAndItsTangent
implicit none
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
F_lastInc, & !< target deformation gradient
F !< previous deformation gradient
real(pReal), intent(in) :: timeinc !< loading time
logical, intent(in) :: forwardData !< age results
real(pReal), intent(in), dimension(3,3) :: rotation_BC !< rotation of load frame
real(pReal),intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
real(pReal),intent(out), dimension(3,3) :: P_av !< average PK stress
real(pReal),intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress
logical :: &
age
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target !< previous deformation gradient
real(pReal), intent(in) :: timeinc !< loading time
real(pReal), intent(in), dimension(3,3) :: rotation_BC !< rotation of load frame
integer(pInt) :: &
j,k,ierr
real(pReal), dimension(3,3,3,3) :: max_dPdF, min_dPdF
@ -975,17 +979,9 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,timeinc, &
write(6,'(/,a)') ' ... evaluating constitutive response ......................................'
flush(6)
age = .False.
if (forwardData) then ! aging results
age = .True.
materialpoint_F0 = reshape(F_lastInc, [3,3,1,product(grid(1:2))*grid3])
endif
if (cutBack) age = .False. ! restore saved variables
materialpoint_F = reshape(F,[3,3,1,product(grid(1:2))*grid3])
call debug_reset() ! this has no effect on rank >0
materialpoint_F = reshape(F,[3,3,1,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field
!--------------------------------------------------------------------------------------------------
! calculate bounds of det(F) and report
if(debugGeneral) then
@ -1002,7 +998,19 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,timeinc, &
flush(6)
endif
call CPFEM_general(age,timeinc)
call debug_reset() ! this has no effect on rank >0
call materialpoint_stressAndItsTangent(.true.,timeinc) ! calculate P field
P = reshape(materialpoint_P, [3,3,grid(1),grid(2),grid3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if (debugRotation) &
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress (lab) / MPa =',&
math_transpose33(P_av)*1.e-6_pReal
P_av = math_rotate_forward33(P_av,rotation_BC)
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',&
math_transpose33(P_av)*1.e-6_pReal
flush(6)
max_dPdF = 0.0_pReal
max_dPdF_norm = 0.0_pReal
@ -1020,38 +1028,24 @@ subroutine utilities_constitutiveResponse(F_lastInc,F,timeinc, &
end do
call MPI_Allreduce(MPI_IN_PLACE,max_dPdF,81,MPI_DOUBLE,MPI_MAX,PETSC_COMM_WORLD,ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_Allreduce max')
if (ierr /= 0_pInt) call IO_error(894_pInt, ext_msg='MPI_Allreduce max')
call MPI_Allreduce(MPI_IN_PLACE,min_dPdF,81,MPI_DOUBLE,MPI_MIN,PETSC_COMM_WORLD,ierr)
if(ierr /=0_pInt) call IO_error(894_pInt, ext_msg='MPI_Allreduce min')
if (ierr /= 0_pInt) call IO_error(894_pInt, ext_msg='MPI_Allreduce min')
C_minmaxAvg = 0.5_pReal*(max_dPdF + min_dPdF)
C_volAvg = sum(sum(materialpoint_dPdF,dim=6),dim=5) * wgt
C_volAvg = sum(sum(materialpoint_dPdF,dim=6),dim=5) * wgt
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
call debug_info() ! this has no effect on rank >0
restartWrite = .false. ! reset restartWrite status
cutBack = .false. ! reset cutBack status
P = reshape(materialpoint_P, [3,3,grid(1),grid(2),grid3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt ! average of P
call MPI_Allreduce(MPI_IN_PLACE,P_av,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr)
if (debugRotation) &
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress (lab) / MPa =',&
math_transpose33(P_av)*1.e-6_pReal
P_av = math_rotate_forward33(P_av,rotation_BC)
write(6,'(/,a,/,3(3(2x,f12.4,1x)/))',advance='no') ' Piola--Kirchhoff stress / MPa =',&
math_transpose33(P_av)*1.e-6_pReal
flush(6)
end subroutine utilities_constitutiveResponse
!--------------------------------------------------------------------------------------------------
!> @brief calculates forward rate, either guessing or just add delta/timeinc
!--------------------------------------------------------------------------------------------------
pure function utilities_calculateRate(avRate,timeinc_old,guess,field_lastInc,field)
pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
use mesh, only: &
grid3, &
grid
@ -1059,17 +1053,17 @@ pure function utilities_calculateRate(avRate,timeinc_old,guess,field_lastInc,fie
implicit none
real(pReal), intent(in), dimension(3,3) :: avRate !< homogeneous addon
real(pReal), intent(in) :: &
timeinc_old !< timeinc of last step
dt !< timeinc between field0 and field
logical, intent(in) :: &
guess !< guess along former trajectory
heterogeneous !< calculate field of rates
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
field_lastInc, & !< data of previous step
field0, & !< data of previous step
field !< data of current step
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: &
utilities_calculateRate
if (guess) then
utilities_calculateRate = (field-field_lastInc) / timeinc_old
if (heterogeneous) then
utilities_calculateRate = (field-field0) / dt
else
utilities_calculateRate = spread(spread(spread(avRate,3,grid(1)),4,grid(2)),5,grid3)
endif