unfinished HDF5 should not be part of the next release
This commit is contained in:
parent
09a66d918d
commit
4dfb52c792
|
@ -1,198 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<h5ds>
|
||||
<!--Top level attributes-->
|
||||
<history>
|
||||
<type>attr</type>
|
||||
<h5path>/</h5path>
|
||||
<category></category>
|
||||
<description>store cmd history</description>
|
||||
</history>
|
||||
|
||||
<inc>
|
||||
<type>attr</type>
|
||||
<h5path>/</h5path>
|
||||
<category></category>
|
||||
<description></description>
|
||||
</inc>
|
||||
|
||||
<!--Geometry section-->
|
||||
<Vx>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/Vx</h5path>
|
||||
<category>Geometry</category>
|
||||
<description>Vector along x define the spectral mesh</description>
|
||||
</Vx>
|
||||
|
||||
<Vy>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/Vy</h5path>
|
||||
<category>Geometry</category>
|
||||
<description>Vector along y defines the spectral mesh</description>
|
||||
</Vy>
|
||||
|
||||
<Vz>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/Vz</h5path>
|
||||
<category>Geometry</category>
|
||||
<description>Vector along z defines the spectral mesh</description>
|
||||
</Vz>
|
||||
|
||||
<ip>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/ip</h5path>
|
||||
<category>Geometry</category>
|
||||
<description></description>
|
||||
</ip>
|
||||
|
||||
<node>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/node</h5path>
|
||||
<category>Geometry</category>
|
||||
<description></description>
|
||||
</node>
|
||||
|
||||
<grain>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/grain</h5path>
|
||||
<category>Geometry</category>
|
||||
<description></description>
|
||||
</grain>
|
||||
|
||||
<pos>
|
||||
<type>Vector</type>
|
||||
<h5path>/Geometry/pos</h5path>
|
||||
<category>Geometry</category>
|
||||
<description></description>
|
||||
</pos>
|
||||
|
||||
<elem>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Geometry/elem</h5path>
|
||||
<category>Geometry</category>
|
||||
<description></description>
|
||||
</elem>
|
||||
|
||||
<!--Crystallite section-->
|
||||
<phase>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Crystallite/phase</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</phase>
|
||||
|
||||
<texture>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Crystallite/texture</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</texture>
|
||||
|
||||
<volume>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Crystallite/volume</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</volume>
|
||||
|
||||
<orientation>
|
||||
<type>Vector</type>
|
||||
<h5path>/Crystallite/orientation</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</orientation>
|
||||
|
||||
<eulerangles>
|
||||
<type>Vector</type>
|
||||
<h5path>/Crystallite/eulerangles</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>Bunnge Euler angles in degrees</description>
|
||||
</eulerangles>
|
||||
|
||||
<grainrotation>
|
||||
<type>Vector</type>
|
||||
<h5path>/Crystallite/grainrotation</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</grainrotation>
|
||||
|
||||
<f>
|
||||
<type>Tensor</type>
|
||||
<h5path>/Crystallite/f</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>deformation gradient (F)</description>
|
||||
</f>
|
||||
|
||||
<p>
|
||||
<type>Tensor</type>
|
||||
<h5path>/Crystallite/p</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>Pikola Kirkhoff stress</description>
|
||||
</p>
|
||||
|
||||
<Cauchy>
|
||||
<type>Tensor</type>
|
||||
<h5path>/Crystallite/Cauchy</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>Cauchy stress tensor</description>
|
||||
</Cauchy>
|
||||
|
||||
<lnV>
|
||||
<type>Tensor</type>
|
||||
<h5path>/Crystallite/lnV</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description></description>
|
||||
</lnV>
|
||||
|
||||
<MisesCauchy>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Crystallite/MisesCauchy</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>von Mises equivalent of Cauchy stress</description>
|
||||
</MisesCauchy>
|
||||
|
||||
<MiseslnV>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Crystallite/MiseslnV</h5path>
|
||||
<category>Crystallite</category>
|
||||
<description>left von Mises strain</description>
|
||||
</MiseslnV>
|
||||
|
||||
<!--Constitutive section-->
|
||||
<resistance_slip>
|
||||
<type>Vector</type>
|
||||
<h5path>/Constitutive/resistance_slip</h5path>
|
||||
<category>Constitutive</category>
|
||||
<description></description>
|
||||
</resistance_slip>
|
||||
|
||||
<shearrate_slip>
|
||||
<type>Vector</type>
|
||||
<h5path>/Constitutive/shearrate_slip</h5path>
|
||||
<category>Constitutive</category>
|
||||
<description></description>
|
||||
</shearrate_slip>
|
||||
|
||||
<resolvedstress_slip>
|
||||
<type>Vector</type>
|
||||
<h5path>/Constitutive/resolvedstress_slip</h5path>
|
||||
<category>Constitutive</category>
|
||||
<description></description>
|
||||
</resolvedstress_slip>
|
||||
|
||||
<totalshear>
|
||||
<type>Scalar</type>
|
||||
<h5path>/Constitutive/totalshear</h5path>
|
||||
<category>Constitutive</category>
|
||||
<description></description>
|
||||
</totalshear>
|
||||
|
||||
<accumulatedshear_slip>
|
||||
<type>Matrix</type>
|
||||
<h5path>/Constitutive/accumulatedshear_slip</h5path>
|
||||
<category>Constitutive</category>
|
||||
<description>vector contains accumulated shear per slip system</description>
|
||||
</accumulatedshear_slip>
|
||||
|
||||
<!--Derived section-->
|
||||
|
||||
</h5ds>
|
|
@ -3,29 +3,11 @@
|
|||
"""Main aggregator"""
|
||||
import os,sys,time
|
||||
|
||||
h5py_flag = os.path.join(os.path.dirname(__file__),'../../.noH5py')
|
||||
h5py_grace = 7200 # only complain once every 7200 sec (2 hours)
|
||||
h5py_msg = "h5py module not found."
|
||||
|
||||
now = time.time()
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__),'../../VERSION')) as f:
|
||||
version = f.readline()[:-1]
|
||||
|
||||
from .environment import Environment # noqa
|
||||
from .asciitable import ASCIItable # noqa
|
||||
try:
|
||||
from .h5table import H5Table # noqa
|
||||
if os.path.exists(h5py_flag): os.remove(h5py_flag) # delete flagging file on success
|
||||
except ImportError:
|
||||
if os.path.exists(h5py_flag):
|
||||
if now - os.path.getmtime(h5py_flag) > h5py_grace: # complain (again) every so-and-so often
|
||||
sys.stderr.write(h5py_msg+'\n')
|
||||
with open(h5py_flag, 'a'):
|
||||
os.utime(h5py_flag,(now,now)) # update flag modification time to "now"
|
||||
else:
|
||||
open(h5py_flag, 'a').close() # create flagging file
|
||||
sys.stderr.write(h5py_msg+'\n') # complain for the first time
|
||||
|
||||
from .config import Material # noqa
|
||||
from .colormaps import Colormap, Color # noqa
|
||||
|
|
|
@ -1,146 +0,0 @@
|
|||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
# ----------------------------------------------------------- #
|
||||
# Ideally the h5py should be enough to serve as the data #
|
||||
# interface for future DAMASK, but since we are still not #
|
||||
# sure when this major shift will happen, it seems to be a #
|
||||
# good idea to provide a interface class that help user ease #
|
||||
# into using HDF5 as the new daily storage driver. #
|
||||
# ----------------------------------------------------------- #
|
||||
|
||||
import os
|
||||
import h5py
|
||||
import numpy as np
|
||||
import xml.etree.cElementTree as ET
|
||||
|
||||
# ---------------------------------------------------------------- #
|
||||
# python 3 has no unicode object, this ensures that the code works #
|
||||
# on Python 2&3 #
|
||||
# ---------------------------------------------------------------- #
|
||||
try:
|
||||
test = isinstance('test', unicode)
|
||||
except(NameError):
|
||||
unicode = str
|
||||
|
||||
|
||||
def lables_to_path(label, dsXMLPath=None):
|
||||
"""Read the XML definition file and return the path."""
|
||||
if dsXMLPath is None:
|
||||
# use the default storage layout in DS_HDF5.xml
|
||||
if "h5table.pyc" in __file__:
|
||||
dsXMLPath = os.path.abspath(__file__).replace("h5table.pyc",
|
||||
"DS_HDF5.xml")
|
||||
else:
|
||||
dsXMLPath = os.path.abspath(__file__).replace("h5table.py",
|
||||
"DS_HDF5.xml")
|
||||
# This current implementation requires that all variables
|
||||
# stay under the root node, the nesting is defined through the
|
||||
# h5path.
|
||||
# Allow new derived data to be put under the root
|
||||
tree = ET.parse(dsXMLPath)
|
||||
try:
|
||||
dataType = tree.find('{}/type'.format(label)).text
|
||||
h5path = tree.find('{}/h5path'.format(label)).text
|
||||
except:
|
||||
dataType = "Scalar"
|
||||
h5path = "/{}".format(label) # just put it under root
|
||||
return (dataType, h5path)
|
||||
|
||||
|
||||
class H5Table(object):
|
||||
"""
|
||||
Lightweight interface class for h5py
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
Interface/wrapper class for manipulating data in HDF5 with DAMASK
|
||||
specialized data structure.
|
||||
--> try to maintain a minimal API design.
|
||||
PARAMETERS
|
||||
----------
|
||||
h5f_path: str
|
||||
Absolute path of the HDF5 file
|
||||
METHOD
|
||||
------
|
||||
del_entry() -- Force delete attributes/group/datasets (dangerous)
|
||||
get_attr() -- Return attributes if possible
|
||||
add_attr() -- Add NEW attributes to dataset/group (no force overwrite)
|
||||
get_data() -- Retrieve data in numpy.ndarray
|
||||
add_data() -- Add dataset to H5 file
|
||||
get_cmdlog() -- Return the command used to generate the data if possible
|
||||
NOTE
|
||||
----
|
||||
1. As an interface class, it uses the lazy evaluation design
|
||||
that reads the data only when it is absolutely necessary.
|
||||
2. The command line used to generate each new feature is stored with
|
||||
each dataset as dataset attribute.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, h5f_path, new_file=False, dsXMLFile=None):
|
||||
self.h5f_path = h5f_path
|
||||
self.dsXMLFile = dsXMLFile
|
||||
msg = 'Created by H5Talbe from DAMASK'
|
||||
mode = 'w' if new_file else 'a'
|
||||
with h5py.File(self.h5f_path, mode) as h5f:
|
||||
h5f['/'].attrs['description'] = msg
|
||||
|
||||
def del_entry(self, feature_name):
|
||||
"""Delete entry in HDF5 table"""
|
||||
dataType, h5f_path = lables_to_path(feature_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
del h5f[h5f_path]
|
||||
|
||||
def get_attr(self, attr_name):
|
||||
dataType, h5f_path = lables_to_path(attr_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
rst_attr = h5f[h5f_path].attrs[attr_name]
|
||||
return rst_attr
|
||||
|
||||
def add_attr(self, attr_name, attr_data):
|
||||
dataType, h5f_path = lables_to_path(attr_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
h5f[h5f_path].attrs[attr_name] = attr_data
|
||||
h5f.flush()
|
||||
|
||||
def get_data(self, feature_name=None):
|
||||
"""Extract dataset from HDF5 table and return it in a numpy array"""
|
||||
dataType, h5f_path = lables_to_path(feature_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
h5f_dst = h5f[h5f_path] # get the handle for target dataset(table)
|
||||
rst_data = np.zeros(h5f_dst.shape)
|
||||
h5f_dst.read_direct(rst_data)
|
||||
return rst_data
|
||||
|
||||
def add_data(self, feature_name, dataset, cmd_log=None):
|
||||
"""Adding new feature into existing HDF5 file"""
|
||||
dataType, h5f_path = lables_to_path(feature_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
# NOTE:
|
||||
# --> If dataset exists, delete the old one so as to write
|
||||
# a new one. For brand new dataset. For brand new one,
|
||||
# record its state as fresh in the cmd log.
|
||||
try:
|
||||
del h5f[h5f_path]
|
||||
print("***deleting old {} from {}".format(feature_name,self.h5f_path))
|
||||
except:
|
||||
# if no cmd log, None will used
|
||||
cmd_log = str(cmd_log) + " [FRESH]"
|
||||
h5f.create_dataset(h5f_path, data=dataset)
|
||||
# store the cmd in log is possible
|
||||
if cmd_log is not None:
|
||||
h5f[h5f_path].attrs['log'] = str(cmd_log)
|
||||
h5f.flush()
|
||||
|
||||
def get_cmdlog(self, feature_name):
|
||||
"""Get cmd history used to generate the feature"""
|
||||
dataType, h5f_path = lables_to_path(feature_name,
|
||||
dsXMLPath=self.dsXMLFile)
|
||||
with h5py.File(self.h5f_path, 'a') as h5f:
|
||||
cmd_logs = h5f[h5f_path].attrs['log']
|
||||
return cmd_logs
|
|
@ -1,72 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
# import re
|
||||
# import sys
|
||||
import collections
|
||||
# import math
|
||||
import damask
|
||||
# import numpy as np
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
# ----- Helper functions ----- #
|
||||
def listify(x):
|
||||
return x if isinstance(x, collections.Iterable) else [x]
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
usageEx = """
|
||||
usage_in_details:
|
||||
Column labels are tagged by '#label#' in formulas.
|
||||
Use ';' for ',' in functions. Numpy is available as 'np'.
|
||||
Special variables: #_row_# -- row index
|
||||
|
||||
Examples:
|
||||
(1) magnitude of vector -- "np.linalg.norm(#vec#)"
|
||||
(2) rounded root of row number -- "round(math.sqrt(#_row_#);3)"
|
||||
"""
|
||||
desp = "Add or alter column(s) with derived values according to "
|
||||
desp += "user-defined arithmetic operation between column(s)."
|
||||
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]' + usageEx,
|
||||
description=desp,
|
||||
version=scriptID)
|
||||
parser.add_option('-l', '--label',
|
||||
dest='labels',
|
||||
action='extend', metavar='<string LIST>',
|
||||
help='(list of) new column labels')
|
||||
parser.add_option('-f', '--formula',
|
||||
dest='formulas',
|
||||
action='extend', metavar='<string LIST>',
|
||||
help='(list of) formulas corresponding to labels')
|
||||
parser.add_option('-c', '--condition',
|
||||
dest='condition', metavar='string',
|
||||
help='condition to filter rows')
|
||||
|
||||
parser.set_defaults(condition=None)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ----- parse formulas ----- #
|
||||
for i in range(len(options.formulas)):
|
||||
options.formulas[i] = options.formulas[i].replace(';', ',')
|
||||
|
||||
# ----- loop over input files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
print("!!!Cannot process {}".format(name))
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# Note:
|
||||
# --> not immediately needed, come back later
|
|
@ -1,61 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
import damask
|
||||
import numpy as np
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
def getCauchy(f, p):
|
||||
"""Return Cauchy stress for given f and p"""
|
||||
# [Cauchy] = (1/det(F)) * [P].[F_transpose]
|
||||
f = f.reshape((3, 3))
|
||||
p = p.reshape((3, 3))
|
||||
return 1.0/np.linalg.det(f)*np.dot(p, f.T).reshape(9)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
desp = "Add column(s) containing Cauchy stress based on given column(s)"
|
||||
desp += "of deformation gradient and first Piola--Kirchhoff stress."
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=desp,
|
||||
version=scriptID)
|
||||
parser.add_option('-f', '--defgrad',
|
||||
dest='defgrad',
|
||||
type='string', metavar='string',
|
||||
help='heading for deformation gradient [%default]')
|
||||
parser.add_option('-p', '--stress',
|
||||
dest='stress',
|
||||
type='string', metavar='string',
|
||||
help='heading for first Piola--Kirchhoff stress [%default]')
|
||||
|
||||
parser.set_defaults(defgrad='f',
|
||||
stress='p')
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ----- loop over input H5 files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# ----- read in data ----- #
|
||||
f = h5f.get_data("f")
|
||||
p = h5f.get_data("p")
|
||||
|
||||
# ----- calculate Cauchy stress ----- #
|
||||
cauchy = [getCauchy(f_i, p_i) for f_i, p_i in zip(f, p)]
|
||||
|
||||
# ----- write to HDF5 file ----- #
|
||||
cmd_log = " ".join([scriptID, name])
|
||||
h5f.add_data('Cauchy', np.array(cauchy), cmd_log=cmd_log)
|
|
@ -1,145 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import damask
|
||||
import numpy as np
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
# TODO
|
||||
# This implementation will have to iterate through the array one
|
||||
# element at a time, maybe there are some other ways to make this
|
||||
# faster.
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
desp = "Add RGB color value corresponding to TSL-OIM scheme for IPF."
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=desp,
|
||||
version=scriptID)
|
||||
parser.add_option('-p', '--pole',
|
||||
dest='pole',
|
||||
type='float', nargs=3, metavar='float float float',
|
||||
help='lab frame direction for IPF [%default]')
|
||||
msg = ', '.join(damask.Symmetry.lattices[1:])
|
||||
parser.add_option('-s', '--symmetry',
|
||||
dest='symmetry',
|
||||
type='choice', choices=damask.Symmetry.lattices[1:],
|
||||
metavar='string',
|
||||
help='crystal symmetry [%default] {{{}}} '.format(msg))
|
||||
parser.add_option('-e', '--eulers',
|
||||
dest='eulers',
|
||||
type='string', metavar='string',
|
||||
help='Euler angles label')
|
||||
parser.add_option('-d', '--degrees',
|
||||
dest='degrees',
|
||||
action='store_true',
|
||||
help='Euler angles are given in degrees [%default]')
|
||||
parser.add_option('-m', '--matrix',
|
||||
dest='matrix',
|
||||
type='string', metavar='string',
|
||||
help='orientation matrix label')
|
||||
parser.add_option('-a',
|
||||
dest='a',
|
||||
type='string', metavar='string',
|
||||
help='crystal frame a vector label')
|
||||
parser.add_option('-b',
|
||||
dest='b',
|
||||
type='string', metavar='string',
|
||||
help='crystal frame b vector label')
|
||||
parser.add_option('-c',
|
||||
dest='c',
|
||||
type='string', metavar='string',
|
||||
help='crystal frame c vector label')
|
||||
parser.add_option('-q', '--quaternion',
|
||||
dest='quaternion',
|
||||
type='string', metavar='string',
|
||||
help='quaternion label')
|
||||
|
||||
parser.set_defaults(pole=(0.0, 0.0, 1.0),
|
||||
symmetry=damask.Symmetry.lattices[-1],
|
||||
degrees=False)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# safe guarding to have only one orientation representation
|
||||
# use dynamic typing to group a,b,c into frame
|
||||
options.frame = [options.a, options.b, options.c]
|
||||
input = [options.eulers is not None,
|
||||
all(options.frame),
|
||||
options.matrix is not None,
|
||||
options.quaternion is not None]
|
||||
|
||||
if np.sum(input) != 1:
|
||||
parser.error('needs exactly one input format.')
|
||||
|
||||
# select input label that was requested (active)
|
||||
label_active = np.where(input)[0][0]
|
||||
(label, dim, inputtype) = [(options.eulers, 3, 'eulers'),
|
||||
(options.frame, [3, 3, 3], 'frame'),
|
||||
(options.matrix, 9, 'matrix'),
|
||||
(options.quaternion, 4, 'quaternion')][label_active]
|
||||
|
||||
# rescale degrees to radians
|
||||
toRadians = math.pi/180.0 if options.degrees else 1.0
|
||||
|
||||
# only use normalized pole
|
||||
pole = np.array(options.pole)
|
||||
pole /= np.linalg.norm(pole)
|
||||
|
||||
# ----- Loop over input files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# extract data from HDF5 file
|
||||
if inputtype == 'eulers':
|
||||
orieData = h5f.get_data(label)
|
||||
elif inputtype == 'matrix':
|
||||
orieData = h5f.get_data(label)
|
||||
orieData = orieData.reshape(orieData.shape[0], 3, 3)
|
||||
elif inputtype == 'frame':
|
||||
vctr_a = h5f.get_data(label[0])
|
||||
vctr_b = h5f.get_data(label[1])
|
||||
vctr_c = h5f.get_data(label[2])
|
||||
frame = np.column_stack((vctr_a, vctr_b, vctr_c))
|
||||
orieData = frame.reshape(frame.shape[0], 3, 3)
|
||||
elif inputtype == 'quaternion':
|
||||
orieData = h5f.get_data(label)
|
||||
|
||||
# calculate the IPF color
|
||||
rgbArrays = np.zeros((orieData.shape[0], 3))
|
||||
for ci in range(rgbArrays.shape[0]):
|
||||
if inputtype == 'eulers':
|
||||
o = damask.Orientation(Eulers=np.array(orieData[ci, :])*toRadians,
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif inputtype == 'matrix':
|
||||
o = damask.Orientation(matrix=orieData[ci, :, :].transpose(),
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif inputtype == 'frame':
|
||||
o = damask.Orientation(matrix=orieData[ci, :, :],
|
||||
symmetry=options.symmetry).reduced()
|
||||
elif inputtype == 'quaternion':
|
||||
o = damask.Orientation(quaternion=orieData[ci, :],
|
||||
symmetry=options.symmetry).reduced()
|
||||
rgbArrays[ci, :] = o.IPFcolor(pole)
|
||||
|
||||
# compose labels/headers for IPF color (RGB)
|
||||
labelIPF = 'IPF_{:g}{:g}{:g}_{sym}'.format(*options.pole,
|
||||
sym=options.symmetry.lower())
|
||||
|
||||
# compose cmd history (go with dataset)
|
||||
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
|
||||
|
||||
# write data to HDF5 file
|
||||
h5f.add_data(labelIPF, rgbArrays, cmd_log=cmd_log)
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import damask
|
||||
import numpy as np
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
# ----- Helper functions ----- #
|
||||
def calcMises(what, tensor):
|
||||
"""Calculate von Mises equivalent"""
|
||||
dev = tensor - np.trace(tensor)/3.0*np.eye(3)
|
||||
symdev = 0.5*(dev+dev.T)
|
||||
return math.sqrt(np.sum(symdev*symdev.T) *
|
||||
{
|
||||
'stress': 3.0/2.0,
|
||||
'strain': 2.0/3.0,
|
||||
}[what.lower()])
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
desp = "Add von Mises equivalent values for symmetric part of requested"
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=desp,
|
||||
version=scriptID)
|
||||
parser.add_option('-e', '--strain',
|
||||
dest='strain',
|
||||
metavar='string',
|
||||
help='name of dataset containing strain tensors')
|
||||
parser.add_option('-s', '--stress',
|
||||
dest='stress',
|
||||
metavar='string',
|
||||
help='name of dataset containing stress tensors')
|
||||
|
||||
parser.set_defaults(strain=None, stress=None)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ----- Loop over input files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# TODO:
|
||||
# Could use some refactoring here
|
||||
if options.stress is not None:
|
||||
# extract stress tensor from HDF5
|
||||
tnsr = h5f.get_data(options.stress)
|
||||
|
||||
# calculate von Mises equivalent row by row
|
||||
vmStress = np.zeros(tnsr.shape[0])
|
||||
for ri in range(tnsr.shape[0]):
|
||||
stressTnsr = tnsr[ri, :].reshape(3, 3)
|
||||
vmStress[ri] = calcMises('stress', stressTnsr)
|
||||
|
||||
# compose label
|
||||
label = "Mises{}".format(options.stress)
|
||||
|
||||
# prepare log info
|
||||
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
|
||||
|
||||
# write data to HDF5 file
|
||||
h5f.add_data(label, vmStress, cmd_log=cmd_log)
|
||||
|
||||
if options.strain is not None:
|
||||
tnsr = h5f.get_data(options.strain)
|
||||
vmStrain = np.zeros(tnsr.shape[0])
|
||||
for ri in range(tnsr.shape[0]):
|
||||
strainTnsr = tnsr[ri, :].reshape(3, 3)
|
||||
vmStrain[ri] = calcMises('strain', strainTnsr)
|
||||
label = "Mises{}".format(options.strain)
|
||||
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
|
||||
h5f.add_data(label, vmStrain, cmd_log=cmd_log)
|
|
@ -1,156 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
import damask
|
||||
import numpy as np
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
# ----- Helper functions ----- #
|
||||
def operator(stretch, strain, eigenvalues):
|
||||
# Albrecht Bertram: Elasticity and Plasticity of Large Deformations
|
||||
# An Introduction (3rd Edition, 2012), p. 102
|
||||
return {'V#ln': np.log(eigenvalues),
|
||||
'U#ln': np.log(eigenvalues),
|
||||
'V#Biot': (np.ones(3, 'd') - 1.0/eigenvalues),
|
||||
'U#Biot': (eigenvalues - np.ones(3, 'd')),
|
||||
'V#Green': (np.ones(3, 'd') - 1.0/eigenvalues/eigenvalues)*0.5,
|
||||
'U#Green': (eigenvalues*eigenvalues - np.ones(3, 'd'))*0.5,
|
||||
}[stretch+'#'+strain]
|
||||
|
||||
|
||||
def calcEPS(defgrads, stretchType, strainType):
|
||||
"""Calculate specific type of strain tensor"""
|
||||
eps = np.zeros(defgrads.shape) # initialize container
|
||||
|
||||
# TODO:
|
||||
# this loop can use some performance boost
|
||||
# (multi-threading?)
|
||||
for ri in range(defgrads.shape[0]):
|
||||
f = defgrads[ri, :].reshape(3, 3)
|
||||
U, S, Vh = np.linalg.svd(f)
|
||||
R = np.dot(U, Vh) # rotation of polar decomposition
|
||||
if stretchType == 'U':
|
||||
stretch = np.dot(np.linalg.inv(R), f) # F = RU
|
||||
elif stretchType == 'V':
|
||||
stretch = np.dot(f, np.linalg.inv(R)) # F = VR
|
||||
|
||||
# kill nasty noisy data
|
||||
stretch = np.where(abs(stretch) < 1e-12, 0, stretch)
|
||||
|
||||
(D, V) = np.linalg.eig(stretch)
|
||||
# flip principal component with negative Eigen values
|
||||
neg = np.where(D < 0.0)
|
||||
D[neg] *= -1.
|
||||
V[:, neg] *= -1.
|
||||
|
||||
# check each vector for orthogonality
|
||||
# --> brutal force enforcing orthogonal base
|
||||
# and re-normalize
|
||||
for i, eigval in enumerate(D):
|
||||
if np.dot(V[:, i], V[:, (i+1) % 3]) != 0.0:
|
||||
V[:, (i+1) % 3] = np.cross(V[:, (i+2) % 3], V[:, i])
|
||||
V[:, (i+1) % 3] /= np.sqrt(np.dot(V[:, (i+1) % 3],
|
||||
V[:, (i+1) % 3].conj()))
|
||||
|
||||
# calculate requested version of strain tensor
|
||||
d = operator(stretchType, strainType, D)
|
||||
eps[ri] = (np.dot(V, np.dot(np.diag(d), V.T)).real).reshape(9)
|
||||
|
||||
return eps
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
desp = "Add column(s) containing given strains based on given stretches"
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=desp,
|
||||
version=scriptID)
|
||||
msg = 'material strains based on right Cauchy-Green deformation, i.e., C and U'
|
||||
parser.add_option('-u', '--right',
|
||||
dest='right',
|
||||
action='store_true',
|
||||
help=msg)
|
||||
msg = 'spatial strains based on left Cauchy--Green deformation, i.e., B and V'
|
||||
parser.add_option('-v', '--left',
|
||||
dest='left',
|
||||
action='store_true',
|
||||
help=msg)
|
||||
parser.add_option('-0', '--logarithmic',
|
||||
dest='logarithmic',
|
||||
action='store_true',
|
||||
help='calculate logarithmic strain tensor')
|
||||
parser.add_option('-1', '--biot',
|
||||
dest='biot',
|
||||
action='store_true',
|
||||
help='calculate biot strain tensor')
|
||||
parser.add_option('-2', '--green',
|
||||
dest='green',
|
||||
action='store_true',
|
||||
help='calculate green strain tensor')
|
||||
# NOTE:
|
||||
# It might be easier to just calculate one type of deformation gradient
|
||||
# at a time.
|
||||
msg = 'heading(s) of columns containing deformation tensor values'
|
||||
parser.add_option('-f', '--defgrad',
|
||||
dest='defgrad',
|
||||
action='extend',
|
||||
metavar='<string LIST>',
|
||||
help=msg)
|
||||
|
||||
parser.set_defaults(right=False, left=False,
|
||||
logarithmic=False, biot=False, green=False,
|
||||
defgrad='f')
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
stretches = []
|
||||
strains = []
|
||||
|
||||
if options.right:
|
||||
stretches.append('U')
|
||||
if options.left:
|
||||
stretches.append('V')
|
||||
|
||||
if options.logarithmic:
|
||||
strains.append('ln')
|
||||
if options.biot:
|
||||
strains.append('Biot')
|
||||
if options.green:
|
||||
strains.append('Green')
|
||||
|
||||
if options.defgrad is None:
|
||||
parser.error('no data column specified.')
|
||||
|
||||
# ----- Loop over input files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# extract defgrads from HDF5 storage
|
||||
F = h5f.get_data(options.defgrad)
|
||||
|
||||
# allow calculate multiple types of strain within the
|
||||
# same cmd call
|
||||
for stretchType in stretches:
|
||||
for strainType in strains:
|
||||
# calculate strain tensor for this type
|
||||
eps = calcEPS(F, stretchType, strainType)
|
||||
|
||||
# compose labels/headers for this strain tensor
|
||||
labelsStrain = strainType + stretchType
|
||||
|
||||
# prepare log info
|
||||
cmd_log = scriptID + '\t' + ' '.join(sys.argv[1:])
|
||||
|
||||
# write data to HDF5 file
|
||||
h5f.add_data(labelsStrain, eps, cmd_log=cmd_log)
|
|
@ -1,130 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# NOTE: #
|
||||
# 1. Current Xdmf rendering in Paraview has some memory issue where #
|
||||
# large number of polyvertices will cause segmentation fault. By #
|
||||
# default, paraview output a cell based xdmf description, which #
|
||||
# is working for small and medium mesh (<10,000) points. Hence a #
|
||||
# rectangular mesh is used as the de facto Geometry description #
|
||||
# here. #
|
||||
# 2. Due to the unstable state Xdmf, it is safer to use port data #
|
||||
# to VTR rather than using xdmf as interpretive layer for data #
|
||||
# visualization. #
|
||||
# ------------------------------------------------------------------- #
|
||||
|
||||
|
||||
import os
|
||||
import damask
|
||||
import h5py
|
||||
import xml.etree.cElementTree as ET
|
||||
from optparse import OptionParser
|
||||
from xml.dom import minidom
|
||||
from damask.h5table import lables_to_path
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName,damask.version])
|
||||
|
||||
|
||||
# ----- HELPER FUNCTIONS -----#
|
||||
def addTopLvlCmt(xmlstr, topLevelCmt):
|
||||
"""Add top level comment to string from ET"""
|
||||
# a quick hack to add the top level comment to XML file
|
||||
# --> somehow Elementtree does not provide this functionality
|
||||
# --> by default
|
||||
strList = xmlstr.split("\n")
|
||||
strList[0] += "\n"+topLevelCmt
|
||||
return "\n".join(strList)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
msg = 'Generate Xdmf wrapper for HDF5 file.'
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description = msg,
|
||||
version = scriptID)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
h5f = filenames[0]
|
||||
h5f_base = h5f.split("/")[-1]
|
||||
|
||||
# ----- parse HDF5 file ----- #
|
||||
h5f_dataDim = {}
|
||||
h5f_dataPath = {}
|
||||
h5f_dataType = {}
|
||||
with h5py.File(h5f, 'a') as f:
|
||||
labels = f.keys()
|
||||
labels += f['/Constitutive'].keys()
|
||||
labels += f['/Crystallite'].keys()
|
||||
labels += ['Vx', 'Vy', "Vz"]
|
||||
# remove group names as they do not contain real data
|
||||
# TODO: use h5py/H5table API to detect dataset name to
|
||||
# avoid necessary name space pruning.
|
||||
labels.remove('Constitutive')
|
||||
labels.remove('Crystallite')
|
||||
labels.remove('Geometry')
|
||||
# loop through remaining labels
|
||||
for label in labels:
|
||||
dataType, h5Path = lables_to_path(label)
|
||||
h5f_dataType[label] = dataType
|
||||
h5f_dataDim[label] = " ".join(map(str,f[h5Path].shape))
|
||||
h5f_dataPath[label] = h5Path
|
||||
|
||||
# ----- constructing xdmf elements ----- #
|
||||
root = ET.Element("Xdmf", version='3.3')
|
||||
root.set('xmlns:xi', "http://www.w3.org/2001/XInclude")
|
||||
root.append(ET.Comment('Generated Xdmf wapper for DAMASH H5 output'))
|
||||
|
||||
# usually there should only be ONE domain
|
||||
domain = ET.SubElement(root, 'Domain',
|
||||
Name=h5f_base.split(".")[0])
|
||||
|
||||
# use global topology through reference
|
||||
grid = ET.SubElement(domain, 'Grid', GridType="Uniform")
|
||||
# geometry section
|
||||
geometry = ET.SubElement(grid, 'Geometry', GeometryType="VXVYVZ")
|
||||
for vector in ["Vz", "Vy", "Vx"]:
|
||||
dataitem = ET.SubElement(geometry, "DataItem",
|
||||
DataType="Float",
|
||||
Dimensions=h5f_dataDim[vector],
|
||||
Name=vector,
|
||||
Format="HDF")
|
||||
dataitem.text = h5f_base.split("/")[-1] + ":{}".format(h5f_dataPath[vector])
|
||||
# topology section
|
||||
# TODO: support for other format based on given option
|
||||
meshDim = [h5f_dataDim["Vz"], h5f_dataDim["Vy"], h5f_dataDim["Vx"]]
|
||||
topology = ET.SubElement(grid, 'Topology',
|
||||
TopologyType="3DRectMesh",
|
||||
Dimensions=" ".join(map(str, meshDim)))
|
||||
|
||||
# attributes section
|
||||
# Question: how to properly handle data mapping for multiphase situations
|
||||
labelsProcessed = ['Vx', 'Vy', 'Vz']
|
||||
# walk through each attributes
|
||||
for label in labels:
|
||||
if label in labelsProcessed: continue
|
||||
print("adding {}...".format(label))
|
||||
attr = ET.SubElement(grid, 'Attribute',
|
||||
Name=label,
|
||||
Type="None",
|
||||
Center="Cell")
|
||||
dataitem = ET.SubElement(attr, 'DataItem',
|
||||
Name=label,
|
||||
Format='HDF',
|
||||
Dimensions=h5f_dataDim[label])
|
||||
dataitem.text = h5f_base + ":" + h5f_dataPath[label]
|
||||
# update progress list
|
||||
labelsProcessed.append(label)
|
||||
|
||||
|
||||
# pretty print the xdmf(xml) file content
|
||||
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml(indent="\t")
|
||||
xmlstr = addTopLvlCmt(xmlstr, '<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>')
|
||||
# write str to file through native python API
|
||||
with open(h5f.replace(".h5", ".xmf"), 'w') as f:
|
||||
f.write(xmlstr)
|
|
@ -1,191 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
import os
|
||||
import vtk
|
||||
import damask
|
||||
from vtk.util import numpy_support
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
msg = "Add scalars, vectors, and/or an RGB tuple from"
|
||||
msg += "an HDF5 to existing VTK rectilinear grid (.vtr/.vtk)."
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=msg,
|
||||
version=scriptID)
|
||||
parser.add_option('--vtk',
|
||||
dest='vtk',
|
||||
type='string', metavar='string',
|
||||
help='VTK file name')
|
||||
parser.add_option('--inplace',
|
||||
dest='inplace',
|
||||
action='store_true',
|
||||
help='modify VTK file in-place')
|
||||
parser.add_option('-r', '--render',
|
||||
dest='render',
|
||||
action='store_true',
|
||||
help='open output in VTK render window')
|
||||
parser.add_option('-d', '--data',
|
||||
dest='data',
|
||||
action='extend', metavar='<string LIST>',
|
||||
help='scalar/vector value(s) label(s)')
|
||||
parser.add_option('-t', '--tensor',
|
||||
dest='tensor',
|
||||
action='extend', metavar='<string LIST>',
|
||||
help='tensor (3x3) value label(s)')
|
||||
parser.add_option('-c', '--color',
|
||||
dest='color',
|
||||
action='extend', metavar='<string LIST>',
|
||||
help='RGB color tuple label')
|
||||
parser.add_option('-m',
|
||||
'--mode',
|
||||
dest='mode',
|
||||
metavar='string',
|
||||
type='choice', choices=['cell', 'point'],
|
||||
help='cell-centered or point-centered coordinates')
|
||||
|
||||
parser.set_defaults(data=[],
|
||||
tensor=[],
|
||||
color=[],
|
||||
mode='cell',
|
||||
inplace=False,
|
||||
render=False)
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ----- Legacy VTK format support ----- #
|
||||
if os.path.splitext(options.vtk)[1] == '.vtr':
|
||||
reader = vtk.vtkXMLRectilinearGridReader()
|
||||
reader.SetFileName(options.vtk)
|
||||
reader.Update()
|
||||
rGrid = reader.GetOutput()
|
||||
elif os.path.splitext(options.vtk)[1] == '.vtk':
|
||||
reader = vtk.vtkGenericDataObjectReader()
|
||||
reader.SetFileName(options.vtk)
|
||||
reader.Update()
|
||||
rGrid = reader.GetRectilinearGridOutput()
|
||||
else:
|
||||
parser.error('Unsupported VTK file type extension.')
|
||||
|
||||
Npoints = rGrid.GetNumberOfPoints()
|
||||
Ncells = rGrid.GetNumberOfCells()
|
||||
|
||||
# ----- Summary output (Sanity Check) ----- #
|
||||
msg = '{}: {} points and {} cells...'.format(options.vtk,
|
||||
Npoints,
|
||||
Ncells)
|
||||
damask.util.croak(msg)
|
||||
|
||||
# ----- Read HDF5 file ----- #
|
||||
# NOTE:
|
||||
# --> It is possible in the future we are trying to add data
|
||||
# from different increment into the same VTK file, but
|
||||
# this feature is not supported for the moment.
|
||||
# --> Let it fail, if the HDF5 is invalid, python interpretor
|
||||
# --> should be able to catch this error.
|
||||
h5f = damask.H5Table(filenames[0], new_file=False)
|
||||
|
||||
# ----- Process data ----- #
|
||||
featureToAdd = {'data': options.data,
|
||||
'tensor': options.tensor,
|
||||
'color': options.color}
|
||||
VTKarray = {} # store all vtkData in dict, then ship them to file
|
||||
for dataType in featureToAdd.keys():
|
||||
featureNames = featureToAdd[dataType]
|
||||
for featureName in featureNames:
|
||||
VTKtype = vtk.VTK_DOUBLE
|
||||
VTKdata = h5f.get_data(featureName)
|
||||
if dataType == 'color':
|
||||
VTKtype = vtk.VTK_UNSIGNED_CHAR
|
||||
VTKdata = (VTKdata*255).astype(int)
|
||||
elif dataType == 'tensor':
|
||||
# Force symmetries tensor type data
|
||||
VTKdata[:, 1] = VTKdata[:, 3] = 0.5*(VTKdata[:, 1]+VTKdata[:, 3])
|
||||
VTKdata[:, 2] = VTKdata[:, 6] = 0.5*(VTKdata[:, 2]+VTKdata[:, 6])
|
||||
VTKdata[:, 5] = VTKdata[:, 7] = 0.5*(VTKdata[:, 5]+VTKdata[:, 7])
|
||||
# use vtk build-in numpy support to add data (much faster)
|
||||
# NOTE:
|
||||
# --> deep copy is necessary here, otherwise memory leak could occur
|
||||
VTKarray[featureName] = numpy_support.numpy_to_vtk(num_array=VTKdata,
|
||||
deep=True,
|
||||
array_type=VTKtype)
|
||||
VTKarray[featureName].SetName(featureName)
|
||||
|
||||
# ----- ship data to vtkGrid ----- #
|
||||
mode = options.mode
|
||||
damask.util.croak('{} mode...'.format(mode))
|
||||
|
||||
# NOTE:
|
||||
# --> For unknown reason, Paraview only recognize one
|
||||
# tensor attributes per cell, thus it would be safe
|
||||
# to only add one attributes as tensor.
|
||||
for dataType in featureToAdd.keys():
|
||||
featureNames = featureToAdd[dataType]
|
||||
for featureName in featureNames:
|
||||
if dataType == 'color':
|
||||
if mode == 'cell':
|
||||
rGrid.GetCellData().SetScalars(VTKarray[featureName])
|
||||
elif mode == 'point':
|
||||
rGrid.GetPointData().SetScalars(VTKarray[featureName])
|
||||
elif dataType == 'tensor':
|
||||
if mode == 'cell':
|
||||
rGrid.GetCellData().SetTensors(VTKarray[featureName])
|
||||
elif mode == 'point':
|
||||
rGrid.GetPointData().SetTensors(VTKarray[featureName])
|
||||
else:
|
||||
if mode == 'cell':
|
||||
rGrid.GetCellData().AddArray(VTKarray[featureName])
|
||||
elif mode == 'point':
|
||||
rGrid.GetPointData().AddArray(VTKarray[featureName])
|
||||
|
||||
rGrid.Modified()
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
rGrid.Update()
|
||||
|
||||
# ----- write Grid to VTK file ----- #
|
||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||
writer.SetDataModeToBinary()
|
||||
writer.SetCompressorTypeToZLib()
|
||||
vtkFileN = os.path.splitext(options.vtk)[0]
|
||||
vtkExtsn = '.vtr' if options.inplace else '_added.vtr'
|
||||
writer.SetFileName(vtkFileN+vtkExtsn)
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(rGrid)
|
||||
else:
|
||||
writer.SetInputData(rGrid)
|
||||
writer.Write()
|
||||
|
||||
# ----- render results from script ----- #
|
||||
if options.render:
|
||||
mapper = vtk.vtkDataSetMapper()
|
||||
mapper.SetInputData(rGrid)
|
||||
actor = vtk.vtkActor()
|
||||
actor.SetMapper(mapper)
|
||||
|
||||
# Create the graphics structure. The renderer renders into the
|
||||
# render window. The render window interactor captures mouse events
|
||||
# and will perform appropriate camera or actor manipulation
|
||||
# depending on the nature of the events.
|
||||
|
||||
ren = vtk.vtkRenderer()
|
||||
|
||||
renWin = vtk.vtkRenderWindow()
|
||||
renWin.AddRenderer(ren)
|
||||
|
||||
ren.AddActor(actor)
|
||||
ren.SetBackground(1, 1, 1)
|
||||
renWin.SetSize(200, 200)
|
||||
|
||||
iren = vtk.vtkRenderWindowInteractor()
|
||||
iren.SetRenderWindow(renWin)
|
||||
|
||||
iren.Initialize()
|
||||
renWin.Render()
|
||||
iren.Start()
|
|
@ -1,135 +0,0 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: UTF-8 no BOM -*-
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# NOTE: #
|
||||
# 1. It might be a good idea to separate IO and calculation. #
|
||||
# 2. Some of the calculation could be useful in other situations, #
|
||||
# why not build a math_util, or math_sup module that contains #
|
||||
# all the useful functions. #
|
||||
# ------------------------------------------------------------------ #
|
||||
|
||||
import os
|
||||
import vtk
|
||||
import numpy as np
|
||||
import damask
|
||||
from optparse import OptionParser
|
||||
|
||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||
scriptID = ' '.join([scriptName, damask.version])
|
||||
|
||||
|
||||
# ----- HELPER FUNCTION ----- #
|
||||
def getMeshFromXYZ(xyzArray, mode):
|
||||
"""Calc Vx,Vy,Vz vectors for vtk rectangular mesh"""
|
||||
# NOTE:
|
||||
# --> np.unique will automatically sort the list
|
||||
# --> although not exactly n(1), but since mesh dimension should
|
||||
# small anyway, so this is still light weight.
|
||||
dim = xyzArray.shape[1] # 2D:2, 3D:3
|
||||
coords = [np.unique(xyzArray[:, i]) for i in range(dim)]
|
||||
|
||||
if mode == 'cell':
|
||||
# since x, y, z might now have the same number of elements,
|
||||
# we have to deal with them individually
|
||||
for ri in range(dim):
|
||||
vctr_pt = coords[ri]
|
||||
vctr_cell = np.empty(len(vctr_pt)+1)
|
||||
# calculate first and last end point
|
||||
vctr_cell[0] = vctr_pt[0] - 0.5*abs(vctr_pt[1] - vctr_pt[0])
|
||||
vctr_cell[-1] = vctr_pt[-1] + 0.5*abs(vctr_pt[-2] - vctr_pt[-1])
|
||||
for cj in range(1, len(vctr_cell)-1):
|
||||
vctr_cell[cj] = 0.5*(vctr_pt[cj-1] + vctr_pt[cj])
|
||||
# update the coords
|
||||
coords[ri] = vctr_cell
|
||||
|
||||
if dim < 3:
|
||||
coords.append([0]) # expand to a 3D with 0 for z
|
||||
|
||||
# auxiliary description
|
||||
grid = np.array(map(len, coords), 'i')
|
||||
N = grid.prod() if mode == 'point' else (grid-1).prod()
|
||||
return coords, grid, N
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MAIN
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
msg = "Create regular voxel grid from points in an ASCIItable."
|
||||
parser = OptionParser(option_class=damask.extendableOption,
|
||||
usage='%prog options [file[s]]',
|
||||
description=msg,
|
||||
version=scriptID)
|
||||
|
||||
parser.add_option('-m',
|
||||
'--mode',
|
||||
dest='mode',
|
||||
metavar='string',
|
||||
type='choice', choices=['cell', 'point'],
|
||||
help='cell-centered or point-centered coordinates')
|
||||
parser.add_option('-p',
|
||||
'--pos', '--position',
|
||||
dest='pos',
|
||||
type='string', metavar='string',
|
||||
help='label of coordinates [%default]')
|
||||
|
||||
parser.set_defaults(mode='cell',
|
||||
pos='pos')
|
||||
|
||||
(options, filenames) = parser.parse_args()
|
||||
|
||||
# ----- loop over input files ----- #
|
||||
for name in filenames:
|
||||
try:
|
||||
h5f = damask.H5Table(name, new_file=False)
|
||||
except:
|
||||
continue
|
||||
damask.util.report(scriptName, name)
|
||||
|
||||
# ----- read xyzArray from HDF5 file ----- #
|
||||
xyzArray = h5f.get_data(options.pos)
|
||||
|
||||
# ----- figure out size and grid ----- #
|
||||
coords, grid, N = getMeshFromXYZ(xyzArray, options.mode)
|
||||
|
||||
# ----- process data ----- #
|
||||
rGrid = vtk.vtkRectilinearGrid()
|
||||
# WARNING: list expansion does not work here as these are
|
||||
# just pointers for a vtk instance. Simply put,
|
||||
# DON't USE
|
||||
# [<VTK_CONSTRUCTOR>] * <NUM_OF_ELEMENTS>
|
||||
coordArray = [vtk.vtkDoubleArray(),
|
||||
vtk.vtkDoubleArray(),
|
||||
vtk.vtkDoubleArray()]
|
||||
|
||||
rGrid.SetDimensions(*grid)
|
||||
for i, points in enumerate(coords):
|
||||
for point in points:
|
||||
coordArray[i].InsertNextValue(point)
|
||||
|
||||
rGrid.SetXCoordinates(coordArray[0])
|
||||
rGrid.SetYCoordinates(coordArray[1])
|
||||
rGrid.SetZCoordinates(coordArray[2])
|
||||
|
||||
# ----- output result ----- #
|
||||
dirPath = os.path.split(name)[0]
|
||||
if name:
|
||||
writer = vtk.vtkXMLRectilinearGridWriter()
|
||||
writer.SetCompressorTypeToZLib()
|
||||
writer.SetDataModeToBinary()
|
||||
# getting the name is a little bit tricky
|
||||
vtkFileName = os.path.splitext(os.path.split(name)[1])[0]
|
||||
vtkFileName += '_{}({})'.format(options.pos, options.mode)
|
||||
vtkFileName += '.' + writer.GetDefaultFileExtension()
|
||||
writer.SetFileName(os.path.join(dirPath, vtkFileName))
|
||||
else:
|
||||
writer = vtk.vtkDataSetWriter()
|
||||
writer.SetHeader('# powered by '+scriptID)
|
||||
writer.WriteToOutputStringOn()
|
||||
|
||||
if vtk.VTK_MAJOR_VERSION <= 5:
|
||||
writer.SetInput(rGrid)
|
||||
else:
|
||||
writer.SetInputData(rGrid)
|
||||
|
||||
writer.Write()
|
Loading…
Reference in New Issue