2020-06-26 15:15:06 +05:30
import multiprocessing as mp
2022-05-09 03:51:53 +05:30
from multiprocessing . synchronize import Lock
2019-04-13 14:41:32 +05:30
import re
2021-04-04 23:14:06 +05:30
import fnmatch
2019-12-13 16:45:45 +05:30
import os
2021-04-05 11:23:19 +05:30
import copy
2020-05-25 22:20:31 +05:30
import datetime
2022-02-02 04:32:03 +05:30
import xml . etree . ElementTree as ET # noqa
2020-05-05 13:27:22 +05:30
import xml . dom . minidom
2020-06-03 14:13:07 +05:30
from pathlib import Path
2020-02-21 23:54:26 +05:30
from functools import partial
2020-11-28 02:15:23 +05:30
from collections import defaultdict
2021-03-31 18:49:23 +05:30
from collections . abc import Iterable
2022-11-23 02:56:15 +05:30
from typing import Optional , Union , Callable , Any , Sequence , Literal , Dict , List , Tuple
2019-09-12 06:33:19 +05:30
import h5py
2019-04-17 23:27:16 +05:30
import numpy as np
2021-03-30 10:54:47 +05:30
import numpy . ma as ma
2019-09-12 06:33:19 +05:30
2020-06-28 15:10:19 +05:30
import damask
2020-03-11 11:20:13 +05:30
from . import VTK
2020-02-16 00:39:24 +05:30
from . import Orientation
2023-04-07 01:12:34 +05:30
from . import Rotation
2020-02-21 22:17:47 +05:30
from . import grid_filters
2020-03-13 00:22:33 +05:30
from . import mechanics
2020-11-16 03:44:46 +05:30
from . import tensor
2020-03-13 00:22:33 +05:30
from . import util
2022-05-09 03:51:53 +05:30
from . _typehints import FloatSequence , IntSequence
2022-05-02 14:48:35 +05:30
2020-11-05 20:43:29 +05:30
h5py3 = h5py . __version__ [ 0 ] == ' 3 '
2019-04-13 14:41:32 +05:30
2021-05-28 16:50:56 +05:30
chunk_size = 1024 * * 2 / / 8 # for compression in HDF5
2022-04-21 21:20:00 +05:30
prefix_inc = ' increment_ '
2021-05-28 16:50:56 +05:30
2022-05-09 03:51:53 +05:30
def _read ( dataset : h5py . _hl . dataset . Dataset ) - > np . ndarray :
2021-04-05 11:23:19 +05:30
""" Read a dataset and its metadata into a numpy.ndarray. """
2021-04-05 13:59:34 +05:30
metadata = { k : ( v . decode ( ) if not h5py3 and type ( v ) is bytes else v ) for k , v in dataset . attrs . items ( ) }
2022-05-09 03:51:53 +05:30
dtype = np . dtype ( dataset . dtype , metadata = metadata ) # type: ignore
2021-04-02 12:26:14 +05:30
return np . array ( dataset , dtype = dtype )
2021-03-31 15:35:51 +05:30
2022-05-09 03:51:53 +05:30
def _match ( requested ,
2022-04-27 19:56:33 +05:30
existing : h5py . _hl . base . KeysViewHDF5 ) - > List [ Any ] :
2021-04-05 13:43:08 +05:30
""" Find matches among two sets of labels. """
2021-04-03 14:38:22 +05:30
def flatten_list ( list_of_lists ) :
return [ e for e_ in list_of_lists for e in e_ ]
if requested is True :
requested = ' * '
elif requested is False or requested is None :
requested = [ ]
requested_ = requested if hasattr ( requested , ' __iter__ ' ) and not isinstance ( requested , str ) else \
[ requested ]
2021-04-04 23:14:06 +05:30
return sorted ( set ( flatten_list ( [ fnmatch . filter ( existing , r ) for r in requested_ ] ) ) ,
2021-04-03 14:38:22 +05:30
key = util . natural_sort )
2022-04-27 19:56:33 +05:30
def _empty_like ( dataset : np . ma . core . MaskedArray ,
N_materialpoints : int ,
fill_float : float ,
fill_int : int ) - > np . ma . core . MaskedArray :
2021-04-05 11:23:19 +05:30
""" Create empty numpy.ma.MaskedArray. """
2021-04-04 16:55:16 +05:30
return ma . array ( np . empty ( ( N_materialpoints , ) + dataset . shape [ 1 : ] , dataset . dtype ) ,
fill_value = fill_float if dataset . dtype in np . sctypes [ ' float ' ] else fill_int ,
mask = True )
2021-03-31 15:35:51 +05:30
2023-04-07 00:26:42 +05:30
class AttributeManagerNullterm ( h5py . AttributeManager ) :
"""
Attribute management for DREAM .3 D hdf5 files .
String attribute values are stored as fixed - length string with NULLTERM
References
- - - - - - - - - -
https : / / stackoverflow . com / questions / 38267076
https : / / stackoverflow . com / questions / 52750232
"""
def create ( self , name , data , shape = None , dtype = None ) :
if isinstance ( data , str ) :
tid = h5py . h5t . C_S1 . copy ( )
tid . set_size ( len ( data + ' ' ) )
super ( ) . create ( name = name , data = data + ' ' , dtype = h5py . Datatype ( tid ) )
else :
super ( ) . create ( name = name , data = data , shape = shape , dtype = dtype )
h5py . _hl . attrs . AttributeManager = AttributeManagerNullterm # 'Monkey patch'
2020-03-09 18:09:20 +05:30
class Result :
2019-09-20 01:02:15 +05:30
"""
2021-04-24 10:43:36 +05:30
Add data to and export data from a DADF5 file .
2019-09-16 08:49:14 +05:30
2021-04-24 22:42:44 +05:30
A DADF5 ( DAMASK HDF5 ) file contains DAMASK results .
2021-04-24 10:43:36 +05:30
Its group / folder structure reflects the layout in material . yaml .
2021-04-23 22:50:07 +05:30
2021-04-24 22:42:44 +05:30
This class provides a customizable view on the DADF5 file .
2021-04-23 22:50:07 +05:30
Upon initialization , all attributes are visible .
2021-04-24 10:43:36 +05:30
Derived quantities are added to the file and existing data is
exported based on the current view .
Examples
- - - - - - - -
2021-04-24 22:42:44 +05:30
Open ' my_file.hdf5 ' , which is assumed to contain deformation gradient ' F '
2021-04-24 10:43:36 +05:30
and first Piola - Kirchhoff stress ' P ' , add the Mises equivalent of the
Cauchy stress , and export it to VTK ( file ) and numpy . ndarray ( memory ) .
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2022-07-27 16:26:37 +05:30
>> > r . add_stress_Cauchy ( )
2021-04-24 10:43:36 +05:30
>> > r . add_equivalent_Mises ( ' sigma ' )
2021-06-17 21:56:37 +05:30
>> > r . export_VTK ( )
2021-12-17 14:17:58 +05:30
>> > r_last = r . view ( increments = - 1 )
2021-04-24 10:43:36 +05:30
>> > sigma_vM_last = r_last . get ( ' sigma_vM ' )
2019-09-16 08:49:14 +05:30
2020-02-21 12:15:05 +05:30
"""
2022-04-27 19:56:33 +05:30
def __init__ ( self , fname : Union [ str , Path ] ) :
2020-02-21 12:15:05 +05:30
"""
2021-03-27 14:40:35 +05:30
New result view bound to a HDF5 file .
2019-09-16 08:49:14 +05:30
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2020-11-19 18:15:40 +05:30
fname : str or pathlib . Path
Name of the DADF5 file to be opened .
2019-09-20 01:02:15 +05:30
2020-02-21 12:15:05 +05:30
"""
with h5py . File ( fname , ' r ' ) as f :
2020-11-18 19:22:16 +05:30
self . version_major = f . attrs [ ' DADF5_version_major ' ]
self . version_minor = f . attrs [ ' DADF5_version_minor ' ]
2023-02-04 14:11:18 +05:30
if ( self . version_major != 0 or not 12 < = self . version_minor < = 14 ) and self . version_major != 1 :
2022-02-22 21:12:05 +05:30
raise TypeError ( f ' unsupported DADF5 version " { self . version_major } . { self . version_minor } " ' )
2021-07-27 13:01:57 +05:30
if self . version_major == 0 and self . version_minor < 14 :
2022-11-09 20:09:47 +05:30
self . export_simulation_setup = None # type: ignore
2020-03-22 20:43:35 +05:30
2021-04-14 10:36:24 +05:30
self . structured = ' cells ' in f [ ' geometry ' ] . attrs . keys ( )
2020-03-22 20:43:35 +05:30
if self . structured :
2021-04-14 10:36:24 +05:30
self . cells = f [ ' geometry ' ] . attrs [ ' cells ' ]
2020-03-22 20:43:35 +05:30
self . size = f [ ' geometry ' ] . attrs [ ' size ' ]
2020-11-18 19:22:16 +05:30
self . origin = f [ ' geometry ' ] . attrs [ ' origin ' ]
2021-05-29 00:27:44 +05:30
else :
2022-05-09 03:51:53 +05:30
self . add_curl = self . add_divergence = self . add_gradient = None # type: ignore
2021-05-28 16:55:28 +05:30
2022-04-21 21:20:00 +05:30
r = re . compile ( rf ' { prefix_inc } ([0-9]+) ' )
2022-04-22 21:56:52 +05:30
self . increments = sorted ( [ i for i in f . keys ( ) if r . match ( i ) ] , key = util . natural_sort )
2022-11-11 11:33:14 +05:30
self . times = np . around ( [ f [ i ] . attrs [ ' t/s ' ] for i in self . increments ] , 12 )
2022-04-22 21:56:52 +05:30
if len ( self . increments ) == 0 :
2021-06-01 10:19:14 +05:30
raise ValueError ( ' incomplete DADF5 file ' )
2020-05-07 22:42:05 +05:30
2021-04-26 23:56:16 +05:30
self . N_materialpoints , self . N_constituents = np . shape ( f [ ' cell_to/phase ' ] )
2021-03-25 23:52:59 +05:30
2021-04-26 23:56:16 +05:30
self . homogenization = f [ ' cell_to/homogenization ' ] [ ' label ' ] . astype ( ' str ' )
2021-04-24 23:09:28 +05:30
self . homogenizations = sorted ( np . unique ( self . homogenization ) , key = util . natural_sort )
2021-04-26 23:56:16 +05:30
self . phase = f [ ' cell_to/phase ' ] [ ' label ' ] . astype ( ' str ' )
2021-04-24 23:09:28 +05:30
self . phases = sorted ( np . unique ( self . phase ) , key = util . natural_sort )
2020-03-22 20:43:35 +05:30
2022-04-27 19:56:33 +05:30
self . fields : List [ str ] = [ ]
2020-11-18 19:22:16 +05:30
for c in self . phases :
2021-04-02 11:54:49 +05:30
self . fields + = f [ ' / ' . join ( [ self . increments [ 0 ] , ' phase ' , c ] ) ] . keys ( )
2020-11-18 19:22:16 +05:30
for m in self . homogenizations :
2021-04-02 11:54:49 +05:30
self . fields + = f [ ' / ' . join ( [ self . increments [ 0 ] , ' homogenization ' , m ] ) ] . keys ( )
2021-04-03 14:38:22 +05:30
self . fields = sorted ( set ( self . fields ) , key = util . natural_sort ) # make unique
2019-10-20 14:30:10 +05:30
2021-01-13 19:27:58 +05:30
self . visible = { ' increments ' : self . increments ,
' phases ' : self . phases ,
' homogenizations ' : self . homogenizations ,
2021-04-02 11:54:49 +05:30
' fields ' : self . fields ,
2021-01-13 19:27:58 +05:30
}
2019-10-20 14:30:10 +05:30
2022-02-21 11:43:19 +05:30
self . fname = Path ( fname ) . expanduser ( ) . absolute ( )
2019-10-20 14:30:10 +05:30
2021-12-17 15:07:45 +05:30
self . _protected = True
2020-05-25 22:42:31 +05:30
2019-10-20 14:30:10 +05:30
2022-04-27 19:56:33 +05:30
def __copy__ ( self ) - > " Result " :
2022-07-08 21:36:41 +05:30
"""
Return deepcopy ( self ) .
Create deep copy .
"""
2021-04-05 11:23:19 +05:30
return copy . deepcopy ( self )
copy = __copy__
2022-04-27 19:56:33 +05:30
def __repr__ ( self ) - > str :
2022-07-08 21:36:41 +05:30
"""
Return repr ( self ) .
Give short human - readable summary .
"""
2022-02-17 11:46:55 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
header = [ f ' Created by { f . attrs [ " creator " ] } ' ,
2022-02-17 22:35:13 +05:30
f ' on { f . attrs [ " created " ] } ' ,
f ' executing " { f . attrs [ " call " ] } " ' ]
2021-01-13 19:27:58 +05:30
visible_increments = self . visible [ ' increments ' ]
2020-04-21 14:47:15 +05:30
2021-12-17 14:17:58 +05:30
first = self . view ( increments = visible_increments [ 0 : 1 ] ) . list_data ( )
2020-04-21 14:47:15 +05:30
2022-02-17 22:35:13 +05:30
last = [ ] if len ( visible_increments ) < 2 else \
2021-12-17 14:17:58 +05:30
self . view ( increments = visible_increments [ - 1 : ] ) . list_data ( )
2020-04-21 14:47:15 +05:30
2022-02-17 22:35:13 +05:30
in_between = [ ] if len ( visible_increments ) < 3 else \
[ f ' \n { inc } \n ... ' for inc in visible_increments [ 1 : - 1 ] ]
2020-04-21 14:47:15 +05:30
2022-02-17 22:35:13 +05:30
return util . srepr ( [ util . deemph ( header ) ] + first + in_between + last )
2020-03-11 10:58:13 +05:30
2022-03-06 01:39:24 +05:30
def _manage_view ( self ,
2022-04-27 19:56:33 +05:30
action : Literal [ ' set ' , ' add ' , ' del ' ] ,
2022-11-23 02:56:15 +05:30
increments : Union [ None , int , Sequence [ int ] , str , Sequence [ str ] , bool ] = None ,
times : Union [ None , float , Sequence [ float ] , str , Sequence [ str ] , bool ] = None ,
phases : Union [ None , str , Sequence [ str ] , bool ] = None ,
homogenizations : Union [ None , str , Sequence [ str ] , bool ] = None ,
fields : Union [ None , str , Sequence [ str ] , bool ] = None ) - > " Result " :
2020-02-21 12:15:05 +05:30
"""
Manages the visibility of the groups .
2019-10-20 14:30:10 +05:30
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2020-03-03 04:17:29 +05:30
action : str
2020-11-23 23:52:48 +05:30
Select from ' set ' , ' add ' , and ' del ' .
2020-03-03 04:17:29 +05:30
2021-04-24 10:43:36 +05:30
Returns
- - - - - - -
view : damask . Result
Modified or new view on the DADF5 file .
2020-02-21 12:15:05 +05:30
"""
2022-03-06 01:39:24 +05:30
if increments is not None and times is not None :
2022-03-09 19:48:18 +05:30
raise ValueError ( ' " increments " and " times " are mutually exclusive ' )
2020-02-21 12:15:05 +05:30
2021-04-05 11:23:19 +05:30
dup = self . copy ( )
2022-03-06 01:39:24 +05:30
for what , datasets in zip ( [ ' increments ' , ' times ' , ' phases ' , ' homogenizations ' , ' fields ' ] ,
[ increments , times , phases , homogenizations , fields ] ) :
if datasets is None :
continue
# allow True/False and string arguments
elif datasets is True :
datasets = ' * '
elif datasets is False :
datasets = [ ]
2022-05-09 03:51:53 +05:30
choice = [ datasets ] if not hasattr ( datasets , ' __iter__ ' ) or isinstance ( datasets , str ) else list ( datasets ) # type: ignore
2022-03-06 01:39:24 +05:30
if what == ' increments ' :
2022-04-21 21:20:00 +05:30
choice = [ c if isinstance ( c , str ) and c . startswith ( prefix_inc ) else
2022-03-06 01:39:24 +05:30
self . increments [ c ] if isinstance ( c , int ) and c < 0 else
2022-04-21 21:20:00 +05:30
f ' { prefix_inc } { c } ' for c in choice ]
2022-03-06 01:39:24 +05:30
elif what == ' times ' :
2022-11-11 11:33:14 +05:30
atol = 1e-2 * np . min ( np . diff ( self . times ) )
2022-03-06 01:39:24 +05:30
what = ' increments '
if choice == [ ' * ' ] :
choice = self . increments
else :
2022-11-11 11:33:14 +05:30
iterator = np . array ( choice ) . astype ( float )
2022-03-06 01:39:24 +05:30
choice = [ ]
for c in iterator :
2022-11-11 11:33:14 +05:30
idx = np . searchsorted ( self . times , c , side = ' left ' )
if idx < len ( self . times ) and np . isclose ( c , self . times [ idx ] , rtol = 0 , atol = atol ) :
2022-03-06 01:39:24 +05:30
choice . append ( self . increments [ idx ] )
2022-11-11 11:33:14 +05:30
elif idx > 0 and np . isclose ( c , self . times [ idx - 1 ] , rtol = 0 , atol = atol ) :
choice . append ( self . increments [ idx - 1 ] )
2022-03-06 01:39:24 +05:30
valid = _match ( choice , getattr ( self , what ) )
existing = set ( self . visible [ what ] )
if action == ' set ' :
dup . visible [ what ] = sorted ( set ( valid ) , key = util . natural_sort )
elif action == ' add ' :
2022-11-11 11:33:14 +05:30
dup . visible [ what ] = sorted ( existing . union ( valid ) , key = util . natural_sort )
2022-03-06 01:39:24 +05:30
elif action == ' del ' :
2022-11-11 11:33:14 +05:30
dup . visible [ what ] = sorted ( existing . difference ( valid ) , key = util . natural_sort )
2021-04-05 11:23:19 +05:30
return dup
2020-02-21 12:15:05 +05:30
2020-03-03 17:13:14 +05:30
2022-04-27 19:56:33 +05:30
def increments_in_range ( self ,
2022-11-23 02:56:15 +05:30
start : Union [ None , str , int ] = None ,
end : Union [ None , str , int ] = None ) - > Sequence [ int ] :
2020-11-19 18:15:40 +05:30
"""
2021-04-24 10:43:36 +05:30
Get all increments within a given range .
2020-11-19 18:15:40 +05:30
Parameters
- - - - - - - - - -
2022-04-02 03:34:50 +05:30
start : int or str , optional
Start increment . Defaults to first .
end : int or str , optional
End increment . Defaults to last .
2020-11-19 18:15:40 +05:30
2021-04-24 10:43:36 +05:30
Returns
- - - - - - -
increments : list of ints
Increment number of all increments within the given bounds .
2021-04-24 18:17:52 +05:30
2020-11-19 18:15:40 +05:30
"""
2022-11-06 23:40:23 +05:30
s , e = map ( lambda x : int ( x . split ( prefix_inc ) [ - 1 ] if isinstance ( x , str ) and x . startswith ( prefix_inc ) else x ) ,
2022-04-02 03:34:50 +05:30
( self . incs [ 0 ] if start is None else start ,
self . incs [ - 1 ] if end is None else end ) )
return [ i for i in self . incs if s < = i < = e ]
2020-03-03 17:13:14 +05:30
2022-04-27 19:56:33 +05:30
def times_in_range ( self ,
2022-11-23 02:56:15 +05:30
start : Optional [ float ] = None ,
end : Optional [ float ] = None ) - > Sequence [ float ] :
2020-11-19 18:15:40 +05:30
"""
2022-11-07 22:51:26 +05:30
Get times of all increments within a given time range .
2020-11-19 18:15:40 +05:30
Parameters
- - - - - - - - - -
2022-04-22 01:26:17 +05:30
start : float , optional
2022-11-07 22:51:26 +05:30
Time of start increment . Defaults to time of first .
2022-04-22 01:26:17 +05:30
end : float , optional
2022-11-07 22:51:26 +05:30
Time of end increment . Defaults to time of last .
2020-11-19 18:15:40 +05:30
2021-04-24 10:43:36 +05:30
Returns
- - - - - - -
times : list of float
2022-04-22 01:26:17 +05:30
Time of each increment within the given bounds .
2021-04-24 18:17:52 +05:30
2020-11-19 18:15:40 +05:30
"""
2022-04-22 01:26:17 +05:30
s , e = ( self . times [ 0 ] if start is None else start ,
self . times [ - 1 ] if end is None else end )
return [ t for t in self . times if s < = t < = e ]
2020-03-03 17:13:14 +05:30
2022-03-05 00:30:33 +05:30
def view ( self , * ,
2022-11-23 02:56:15 +05:30
increments : Union [ None , int , Sequence [ int ] , str , Sequence [ str ] , bool ] = None ,
times : Union [ None , float , Sequence [ float ] , str , Sequence [ str ] , bool ] = None ,
phases : Union [ None , str , Sequence [ str ] , bool ] = None ,
homogenizations : Union [ None , str , Sequence [ str ] , bool ] = None ,
fields : Union [ None , str , Sequence [ str ] , bool ] = None ,
protected : Optional [ bool ] = None ) - > " Result " :
2020-02-21 12:15:05 +05:30
"""
2021-01-13 19:27:58 +05:30
Set view .
2020-02-15 19:43:56 +05:30
2021-12-17 14:17:58 +05:30
Wildcard matching with ' ? ' and ' * ' is supported .
True is equivalent to ' * ' , False is equivalent to [ ] .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2021-12-17 14:17:58 +05:30
increments : ( list of ) int , ( list of ) str , or bool , optional .
Number ( s ) of increments to select .
times : ( list of ) float , ( list of ) str , or bool , optional .
Simulation time ( s ) of increments to select .
phases : ( list of ) str , or bool , optional .
Name ( s ) of phases to select .
homogenizations : ( list of ) str , or bool , optional .
Name ( s ) of homogenizations to select .
fields : ( list of ) str , or bool , optional .
Name ( s ) of fields to select .
2021-12-17 15:01:41 +05:30
protected : bool , optional .
Protection status of existing data .
2020-02-15 19:43:56 +05:30
2021-04-23 22:50:07 +05:30
Returns
- - - - - - -
view : damask . Result
2021-04-24 22:42:44 +05:30
View with only the selected attributes being visible .
2021-04-23 22:50:07 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
Get a view that shows only results from the initial configuration :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2022-06-06 03:09:06 +05:30
>> > r_first = r . view ( increments = 0 )
2021-04-24 10:43:36 +05:30
2021-04-24 22:42:44 +05:30
Get a view that shows all results between simulation times of 10 to 40 :
2021-04-24 10:43:36 +05:30
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2021-12-17 14:17:58 +05:30
>> > r_t10to40 = r . view ( times = r . times_in_range ( 10.0 , 40.0 ) )
2021-04-24 10:43:36 +05:30
2020-02-21 12:15:05 +05:30
"""
2022-03-06 01:39:24 +05:30
dup = self . _manage_view ( ' set ' , increments , times , phases , homogenizations , fields )
2021-12-17 15:01:41 +05:30
if protected is not None :
2021-12-17 18:58:12 +05:30
if not protected :
2021-12-17 15:07:45 +05:30
print ( util . warn ( ' Warning: Modification of existing datasets allowed! ' ) )
dup . _protected = protected
2021-12-17 15:01:41 +05:30
return dup
2020-02-15 22:26:20 +05:30
2022-03-05 00:30:33 +05:30
def view_more ( self , * ,
2022-11-23 02:56:15 +05:30
increments : Union [ None , int , Sequence [ int ] , str , Sequence [ str ] , bool ] = None ,
times : Union [ None , float , Sequence [ float ] , str , Sequence [ str ] , bool ] = None ,
phases : Union [ None , str , Sequence [ str ] , bool ] = None ,
homogenizations : Union [ None , str , Sequence [ str ] , bool ] = None ,
fields : Union [ None , str , Sequence [ str ] , bool ] = None ) - > " Result " :
2020-02-21 12:15:05 +05:30
"""
2021-01-13 19:27:58 +05:30
Add to view .
2020-02-15 22:26:20 +05:30
2021-12-17 14:17:58 +05:30
Wildcard matching with ' ? ' and ' * ' is supported .
True is equivalent to ' * ' , False is equivalent to [ ] .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2021-12-17 14:17:58 +05:30
increments : ( list of ) int , ( list of ) str , or bool , optional .
Number ( s ) of increments to select .
times : ( list of ) float , ( list of ) str , or bool , optional .
Simulation time ( s ) of increments to select .
phases : ( list of ) str , or bool , optional .
Name ( s ) of phases to select .
homogenizations : ( list of ) str , or bool , optional .
Name ( s ) of homogenizations to select .
fields : ( list of ) str , or bool , optional .
Name ( s ) of fields to select .
2020-02-15 22:26:20 +05:30
2021-04-23 22:50:07 +05:30
Returns
- - - - - - -
modified_view : damask . Result
2021-04-24 22:42:44 +05:30
View with additional visible attributes .
2021-04-23 22:50:07 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
Get a view that shows only results from first and last increment :
>> > import damask
2021-12-17 14:17:58 +05:30
>> > r_empty = damask . Result ( ' my_file.hdf5 ' ) . view ( increments = False )
>> > r_first = r_empty . view_more ( increments = 0 )
>> > r_first_and_last = r . first . view_more ( increments = - 1 )
2021-04-24 10:43:36 +05:30
2020-02-21 12:15:05 +05:30
"""
2022-03-06 01:39:24 +05:30
return self . _manage_view ( ' add ' , increments , times , phases , homogenizations , fields )
2020-02-15 22:26:20 +05:30
2022-03-05 00:30:33 +05:30
def view_less ( self , * ,
2022-11-23 02:56:15 +05:30
increments : Union [ None , int , Sequence [ int ] , str , Sequence [ str ] , bool ] = None ,
times : Union [ None , float , Sequence [ float ] , str , Sequence [ str ] , bool ] = None ,
phases : Union [ None , str , Sequence [ str ] , bool ] = None ,
homogenizations : Union [ None , str , Sequence [ str ] , bool ] = None ,
fields : Union [ None , str , Sequence [ str ] , bool ] = None ) - > " Result " :
2020-02-21 12:15:05 +05:30
"""
2021-04-24 22:42:44 +05:30
Remove from view .
2020-02-15 19:43:56 +05:30
2021-12-17 14:17:58 +05:30
Wildcard matching with ' ? ' and ' * ' is supported .
True is equivalent to ' * ' , False is equivalent to [ ] .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2021-12-17 14:17:58 +05:30
increments : ( list of ) int , ( list of ) str , or bool , optional .
Number ( s ) of increments to select .
times : ( list of ) float , ( list of ) str , or bool , optional .
Simulation time ( s ) of increments to select .
phases : ( list of ) str , or bool , optional .
Name ( s ) of phases to select .
homogenizations : ( list of ) str , or bool , optional .
Name ( s ) of homogenizations to select .
fields : ( list of ) str , or bool , optional .
Name ( s ) of fields to select .
2019-10-19 16:40:46 +05:30
2021-04-23 22:50:07 +05:30
Returns
- - - - - - -
modified_view : damask . Result
2021-04-24 22:42:44 +05:30
View with fewer visible attributes .
2021-04-23 22:50:07 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
2021-04-24 22:42:44 +05:30
Get a view that omits the undeformed configuration :
2021-04-24 10:43:36 +05:30
>> > import damask
>> > r_all = damask . Result ( ' my_file.hdf5 ' )
2021-12-17 14:17:58 +05:30
>> > r_deformed = r_all . view_less ( increments = 0 )
2021-04-24 10:43:36 +05:30
2020-02-21 12:15:05 +05:30
"""
2022-03-06 01:39:24 +05:30
return self . _manage_view ( ' del ' , increments , times , phases , homogenizations , fields )
2020-02-15 19:43:56 +05:30
2020-05-30 16:17:36 +05:30
2022-04-27 19:56:33 +05:30
def rename ( self ,
name_src : str ,
name_dst : str ) :
2020-05-30 16:17:36 +05:30
"""
2021-04-24 10:43:36 +05:30
Rename / move datasets ( within the same group / folder ) .
This operation is discouraged because the history of the
2021-04-25 21:04:41 +05:30
data becomes untraceable and data integrity is not ensured .
2020-06-25 01:04:51 +05:30
2020-06-02 01:43:01 +05:30
Parameters
- - - - - - - - - -
2021-04-24 10:43:36 +05:30
name_src : str
Name of the datasets to be renamed .
name_dst : str
New name of the datasets .
Examples
- - - - - - - -
Rename datasets containing the deformation gradient from ' F ' to ' def_grad ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2021-12-17 15:01:41 +05:30
>> > r_unprotected = r . view ( protected = False )
2021-04-24 10:43:36 +05:30
>> > r_unprotected . rename ( ' F ' , ' def_grad ' )
2020-05-30 16:17:36 +05:30
"""
2021-12-17 15:07:45 +05:30
if self . _protected :
2022-02-22 21:12:05 +05:30
raise PermissionError ( ' rename datasets ' )
2020-05-30 16:17:36 +05:30
2021-04-04 23:14:06 +05:30
with h5py . File ( self . fname , ' a ' ) as f :
for inc in self . visible [ ' increments ' ] :
2021-04-05 11:23:19 +05:30
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
2021-04-14 22:54:45 +05:30
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
2021-04-24 10:43:36 +05:30
path_src = ' / ' . join ( [ inc , ty , label , field , name_src ] )
path_dst = ' / ' . join ( [ inc , ty , label , field , name_dst ] )
if path_src in f . keys ( ) :
f [ path_dst ] = f [ path_src ]
f [ path_dst ] . attrs [ ' renamed ' ] = f ' original name: { name_src } ' if h5py3 else \
f ' original name: { name_src } ' . encode ( )
del f [ path_src ]
2022-04-27 19:56:33 +05:30
def remove ( self , name : str ) :
2021-04-24 10:43:36 +05:30
"""
Remove / delete datasets .
This operation is discouraged because the history of the
2021-04-25 21:04:41 +05:30
data becomes untraceable and data integrity is not ensured .
2021-04-24 10:43:36 +05:30
Parameters
- - - - - - - - - -
name : str
Name of the datasets to be deleted .
Examples
- - - - - - - -
Delete the deformation gradient ' F ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2021-12-17 15:01:41 +05:30
>> > r_unprotected = r . view ( protected = False )
2021-04-24 10:43:36 +05:30
>> > r_unprotected . remove ( ' F ' )
"""
2021-12-17 15:07:45 +05:30
if self . _protected :
2022-02-22 21:12:05 +05:30
raise PermissionError ( ' delete datasets ' )
2021-04-24 10:43:36 +05:30
with h5py . File ( self . fname , ' a ' ) as f :
for inc in self . visible [ ' increments ' ] :
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
path = ' / ' . join ( [ inc , ty , label , field , name ] )
if path in f . keys ( ) : del f [ path ]
2021-04-04 23:14:06 +05:30
2020-05-30 16:17:36 +05:30
2022-04-27 19:56:33 +05:30
def list_data ( self ) - > List [ str ] :
2022-02-17 22:35:13 +05:30
"""
Collect information on all active datasets in the file .
Returns
- - - - - - -
data : list of str
Line - formatted information about active datasets .
"""
msg = [ ]
2020-02-21 12:15:05 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
2021-04-04 15:57:39 +05:30
for inc in self . visible [ ' increments ' ] :
2022-02-17 22:35:13 +05:30
msg + = [ f ' \n { inc } ( { self . times [ self . increments . index ( inc ) ] } s) ' ]
2021-04-05 11:23:19 +05:30
for ty in [ ' phase ' , ' homogenization ' ] :
2022-02-17 22:35:13 +05:30
msg + = [ f ' { ty } ' ]
2021-04-05 11:23:19 +05:30
for label in self . visible [ ty + ' s ' ] :
2022-02-17 22:35:13 +05:30
msg + = [ f ' { label } ' ]
2021-04-14 22:54:45 +05:30
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
2022-02-17 22:35:13 +05:30
msg + = [ f ' { field } ' ]
2021-04-05 11:23:19 +05:30
for d in f [ ' / ' . join ( [ inc , ty , label , field ] ) ] . keys ( ) :
dataset = f [ ' / ' . join ( [ inc , ty , label , field , d ] ) ]
2022-02-17 22:35:13 +05:30
unit = dataset . attrs [ " unit " ] if h5py3 else \
dataset . attrs [ " unit " ] . decode ( )
2021-04-14 10:36:24 +05:30
description = dataset . attrs [ ' description ' ] if h5py3 else \
dataset . attrs [ ' description ' ] . decode ( )
2022-02-17 22:35:13 +05:30
msg + = [ f ' { d } / { unit } : { description } ' ]
2020-02-21 12:15:05 +05:30
2021-04-05 11:23:19 +05:30
return msg
2020-02-21 12:15:05 +05:30
2022-04-27 19:56:33 +05:30
def enable_user_function ( self ,
func : Callable ) :
2020-07-03 10:59:31 +05:30
globals ( ) [ func . __name__ ] = func
print ( f ' Function { func . __name__ } enabled in add_calculation. ' )
2022-11-09 20:09:47 +05:30
@property
def simulation_setup_files ( self ) :
""" Simulation setup files used to generate the Result object. """
files = [ ]
with h5py . File ( self . fname , ' r ' ) as f_in :
2022-11-10 01:32:38 +05:30
f_in [ ' setup ' ] . visititems ( lambda name , obj : files . append ( name ) if isinstance ( obj , h5py . Dataset ) else None )
2022-11-09 20:09:47 +05:30
return files
2022-04-02 03:34:50 +05:30
@property
2022-04-22 02:59:33 +05:30
def incs ( self ) :
return [ int ( i . split ( prefix_inc ) [ - 1 ] ) for i in self . increments ]
2022-04-02 03:34:50 +05:30
2020-07-31 20:20:01 +05:30
@property
2022-04-27 19:56:33 +05:30
def coordinates0_point ( self ) - > np . ndarray :
2021-04-23 22:50:07 +05:30
""" Initial/undeformed cell center coordinates. """
2020-02-21 12:15:05 +05:30
if self . structured :
2020-12-04 03:30:49 +05:30
return grid_filters . coordinates0_point ( self . cells , self . size , self . origin ) . reshape ( - 1 , 3 , order = ' F ' )
2020-02-21 12:15:05 +05:30
else :
2020-02-21 22:17:47 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
2021-04-26 03:52:37 +05:30
return f [ ' geometry/x_p ' ] [ ( ) ]
2020-02-21 12:15:05 +05:30
2020-07-31 20:20:01 +05:30
@property
2022-04-27 19:56:33 +05:30
def coordinates0_node ( self ) - > np . ndarray :
2021-04-23 22:50:07 +05:30
""" Initial/undeformed nodal coordinates. """
2020-04-22 11:10:02 +05:30
if self . structured :
2020-12-04 03:30:49 +05:30
return grid_filters . coordinates0_node ( self . cells , self . size , self . origin ) . reshape ( - 1 , 3 , order = ' F ' )
2020-04-22 11:10:02 +05:30
else :
with h5py . File ( self . fname , ' r ' ) as f :
return f [ ' geometry/x_n ' ] [ ( ) ]
2021-04-03 12:28:22 +05:30
@property
2022-04-27 19:56:33 +05:30
def geometry0 ( self ) - > VTK :
2021-04-23 22:50:07 +05:30
""" Initial/undeformed geometry. """
2021-04-03 12:28:22 +05:30
if self . structured :
2021-06-15 20:32:02 +05:30
return VTK . from_image_data ( self . cells , self . size , self . origin )
2021-04-03 12:28:22 +05:30
else :
with h5py . File ( self . fname , ' r ' ) as f :
return VTK . from_unstructured_grid ( f [ ' /geometry/x_n ' ] [ ( ) ] ,
f [ ' /geometry/T_c ' ] [ ( ) ] - 1 ,
f [ ' /geometry/T_c ' ] . attrs [ ' VTK_TYPE ' ] if h5py3 else \
f [ ' /geometry/T_c ' ] . attrs [ ' VTK_TYPE ' ] . decode ( ) )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-02 14:48:35 +05:30
def _add_absolute ( x : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
' data ' : np . abs ( x [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f ' | { x [ " label " ] } | ' ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : x [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " absolute value of { x [ ' label ' ] } ( { x [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_absolute '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_absolute ( self , x : str ) :
2020-02-21 12:15:05 +05:30
"""
Add absolute value .
Parameters
- - - - - - - - - -
x : str
2021-04-24 10:43:36 +05:30
Name of scalar , vector , or tensor dataset to take absolute value of .
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_absolute , { ' x ' : x } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-21 20:03:35 +05:30
def _add_calculation ( * * kwargs ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
formula = kwargs [ ' formula ' ]
for d in re . findall ( r ' #(.*?)# ' , formula ) :
2020-06-25 01:04:51 +05:30
formula = formula . replace ( f ' # { d } # ' , f " kwargs[ ' { d } ' ][ ' data ' ] " )
2021-08-17 01:01:17 +05:30
data = eval ( formula )
if not hasattr ( data , ' shape ' ) or data . shape [ 0 ] != kwargs [ d ] [ ' data ' ] . shape [ 0 ] :
2022-02-22 21:12:05 +05:30
raise ValueError ( ' " {} " results in invalid shape ' . format ( kwargs [ ' formula ' ] ) )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
return {
2021-08-17 01:01:17 +05:30
' data ' : data ,
2020-02-21 23:54:26 +05:30
' label ' : kwargs [ ' label ' ] ,
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : kwargs [ ' unit ' ] ,
' description ' : f " { kwargs [ ' description ' ] } (formula: { kwargs [ ' formula ' ] } ) " ,
' creator ' : ' add_calculation '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_calculation ( self ,
formula : str ,
name : str ,
unit : str = ' n/a ' ,
2022-11-23 02:56:15 +05:30
description : Optional [ str ] = None ) :
2020-02-21 12:15:05 +05:30
"""
Add result of a general formula .
Parameters
- - - - - - - - - -
2020-02-21 17:33:50 +05:30
formula : str
2021-04-25 11:17:00 +05:30
Formula to calculate resulting dataset .
Existing datasets are referenced by ' #TheirName# ' .
name : str
Name of resulting dataset .
2020-02-21 12:15:05 +05:30
unit : str , optional
2020-03-19 12:15:31 +05:30
Physical unit of the result .
2020-02-21 12:15:05 +05:30
description : str , optional
2020-03-19 12:15:31 +05:30
Human - readable description of the result .
2020-02-21 12:15:05 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
Add total dislocation density , i . e . the sum of mobile dislocation
density ' rho_mob ' and dislocation dipole density ' rho_dip ' over
all slip systems :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2021-04-25 11:17:00 +05:30
>> > r . add_calculation ( ' np.sum(#rho_mob#,axis=1) ' , ' rho_mob_total ' ,
2021-04-24 10:43:36 +05:30
. . . ' 1/m² ' , ' total mobile dislocation density ' )
2021-04-25 21:04:41 +05:30
>> > r . add_calculation ( ' np.sum(#rho_dip#,axis=1) ' , ' rho_dip_total ' ,
2021-04-24 10:43:36 +05:30
. . . ' 1/m² ' , ' total dislocation dipole density ' )
2021-04-25 11:17:00 +05:30
>> > r . add_calculation ( ' #rho_dip_total#+#rho_mob_total ' , ' rho_total ' ,
2021-04-24 10:43:36 +05:30
. . . ' 1/m² ' , ' total dislocation density ' )
Add Mises equivalent of the Cauchy stress without storage of
intermediate results . Define a user function for better readability :
>> > import damask
>> > def equivalent_stress ( F , P ) :
. . . sigma = damask . mechanics . stress_Cauchy ( F = F , P = P )
. . . return damask . mechanics . equivalent_stress_Mises ( sigma )
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . enable_user_function ( equivalent_stress )
2021-04-25 11:17:00 +05:30
>> > r . add_calculation ( ' equivalent_stress(#F#,#P#) ' , ' sigma_vM ' , ' Pa ' ,
2021-04-24 10:43:36 +05:30
. . . ' Mises equivalent of the Cauchy stress ' )
2020-02-21 12:15:05 +05:30
"""
2022-05-23 11:31:17 +05:30
dataset_mapping = { d : d for d in set ( re . findall ( r ' #(.*?)# ' , formula ) ) } # datasets used in the formula
2021-04-24 10:43:36 +05:30
args = { ' formula ' : formula , ' label ' : name , ' unit ' : unit , ' description ' : description }
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_calculation , dataset_mapping , args )
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_stress_Cauchy ( P : Dict [ str , Any ] , F : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-18 03:26:22 +05:30
' data ' : mechanics . stress_Cauchy ( P [ ' data ' ] , F [ ' data ' ] ) ,
2020-02-21 23:54:26 +05:30
' label ' : ' sigma ' ,
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : P [ ' meta ' ] [ ' unit ' ] ,
' description ' : " Cauchy stress calculated "
f " from { P [ ' label ' ] } ( { P [ ' meta ' ] [ ' description ' ] } ) "
f " and { F [ ' label ' ] } ( { F [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_stress_Cauchy '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_stress_Cauchy ( self ,
P : str = ' P ' ,
F : str = ' F ' ) :
2020-02-21 12:15:05 +05:30
"""
2020-02-21 17:33:50 +05:30
Add Cauchy stress calculated from first Piola - Kirchhoff stress and deformation gradient .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
P : str , optional
2021-04-24 10:43:36 +05:30
Name of the dataset containing the first Piola - Kirchhoff stress . Defaults to ' P ' .
2020-02-21 12:15:05 +05:30
F : str , optional
2021-04-24 10:43:36 +05:30
Name of the dataset containing the deformation gradient . Defaults to ' F ' .
2020-02-21 12:15:05 +05:30
"""
2020-11-18 03:26:22 +05:30
self . _add_generic_pointwise ( self . _add_stress_Cauchy , { ' P ' : P , ' F ' : F } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_determinant ( T : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
' data ' : np . linalg . det ( T [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " det( { T [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " determinant of tensor { T [ ' label ' ] } ( { T [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_determinant '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_determinant ( self , T : str ) :
2020-02-21 12:15:05 +05:30
"""
Add the determinant of a tensor .
Parameters
- - - - - - - - - -
2020-02-21 17:33:50 +05:30
T : str
2021-04-24 10:43:36 +05:30
Name of tensor dataset .
Examples
- - - - - - - -
Add the determinant of plastic deformation gradient ' F_p ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_determinant ( ' F_p ' )
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_determinant , { ' T ' : T } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_deviator ( T : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-19 19:08:54 +05:30
' data ' : tensor . deviatoric ( T [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " s_ { T [ ' label ' ] } " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " deviator of tensor { T [ ' label ' ] } ( { T [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_deviator '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_deviator ( self , T : str ) :
2020-02-21 12:15:05 +05:30
"""
Add the deviatoric part of a tensor .
Parameters
- - - - - - - - - -
2020-02-21 17:33:50 +05:30
T : str
2021-04-24 10:43:36 +05:30
Name of tensor dataset .
2020-02-21 12:15:05 +05:30
2021-07-16 13:51:06 +05:30
Examples
- - - - - - - -
Add the deviatoric part of Cauchy stress ' sigma ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_deviator ( ' sigma ' )
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_deviator , { ' T ' : T } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_eigenvalue ( T_sym : Dict [ str , Any ] , eigenvalue : Literal [ ' max, mid, min ' ] ) - > Dict [ str , Any ] :
2020-05-27 21:06:30 +05:30
if eigenvalue == ' max ' :
2021-03-25 23:52:59 +05:30
label , p = ' maximum ' , 2
2020-05-27 21:06:30 +05:30
elif eigenvalue == ' mid ' :
2021-03-25 23:52:59 +05:30
label , p = ' intermediate ' , 1
2020-05-27 21:06:30 +05:30
elif eigenvalue == ' min ' :
2021-03-25 23:52:59 +05:30
label , p = ' minimum ' , 0
2022-04-27 19:56:33 +05:30
else :
2022-05-21 20:03:35 +05:30
raise ValueError ( f ' invalid eigenvalue: { eigenvalue } ' )
2020-05-27 21:06:30 +05:30
2020-02-21 23:54:26 +05:30
return {
2020-11-16 03:44:46 +05:30
' data ' : tensor . eigenvalues ( T_sym [ ' data ' ] ) [ : , p ] ,
2020-06-25 01:04:51 +05:30
' label ' : f " lambda_ { eigenvalue } ( { T_sym [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T_sym [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " { label } eigenvalue of { T_sym [ ' label ' ] } ( { T_sym [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_eigenvalue '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_eigenvalue ( self ,
T_sym : str ,
2022-05-09 03:51:53 +05:30
eigenvalue : Literal [ ' max ' , ' mid ' , ' min ' ] = ' max ' ) :
2020-02-21 12:15:05 +05:30
"""
Add eigenvalues of symmetric tensor .
Parameters
- - - - - - - - - -
2020-03-03 03:44:59 +05:30
T_sym : str
2021-04-24 10:43:36 +05:30
Name of symmetric tensor dataset .
2022-11-07 22:48:00 +05:30
eigenvalue : { ' max ' , ' mid ' , ' min ' } , optional
2021-06-01 10:19:14 +05:30
Eigenvalue . Defaults to ' max ' .
2020-02-21 12:15:05 +05:30
2021-07-16 13:51:06 +05:30
Examples
- - - - - - - -
Add the minimum eigenvalue of Cauchy stress ' sigma ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_eigenvalue ( ' sigma ' , ' min ' )
2020-02-21 12:15:05 +05:30
"""
2020-05-27 21:06:30 +05:30
self . _add_generic_pointwise ( self . _add_eigenvalue , { ' T_sym ' : T_sym } , { ' eigenvalue ' : eigenvalue } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_eigenvector ( T_sym : Dict [ str , Any ] , eigenvalue : Literal [ ' max ' , ' mid ' , ' min ' ] ) - > Dict [ str , Any ] :
2020-05-27 21:06:30 +05:30
if eigenvalue == ' max ' :
label , p = ' maximum ' , 2
elif eigenvalue == ' mid ' :
label , p = ' intermediate ' , 1
elif eigenvalue == ' min ' :
label , p = ' minimum ' , 0
2022-05-09 03:51:53 +05:30
else :
2022-05-21 20:03:35 +05:30
raise ValueError ( f ' invalid eigenvalue: { eigenvalue } ' )
2022-05-09 03:51:53 +05:30
2020-02-21 23:54:26 +05:30
return {
2020-11-16 03:44:46 +05:30
' data ' : tensor . eigenvectors ( T_sym [ ' data ' ] ) [ : , p ] ,
2020-06-25 01:04:51 +05:30
' label ' : f " v_ { eigenvalue } ( { T_sym [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : ' 1 ' ,
' description ' : f " eigenvector corresponding to { label } eigenvalue "
f " of { T_sym [ ' label ' ] } ( { T_sym [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_eigenvector '
2020-02-21 23:54:26 +05:30
}
2020-06-25 01:04:51 +05:30
}
2022-04-27 19:56:33 +05:30
def add_eigenvector ( self ,
T_sym : str ,
2022-05-09 03:51:53 +05:30
eigenvalue : Literal [ ' max ' , ' mid ' , ' min ' ] = ' max ' ) :
2020-02-21 12:15:05 +05:30
"""
2020-05-27 21:06:30 +05:30
Add eigenvector of symmetric tensor .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2020-03-03 03:44:59 +05:30
T_sym : str
2021-04-24 10:43:36 +05:30
Name of symmetric tensor dataset .
2022-11-07 22:48:00 +05:30
eigenvalue : { ' max ' , ' mid ' , ' min ' } , optional
2021-04-06 21:09:44 +05:30
Eigenvalue to which the eigenvector corresponds .
2021-06-01 10:19:14 +05:30
Defaults to ' max ' .
2020-02-21 12:15:05 +05:30
"""
2020-05-27 21:06:30 +05:30
self . _add_generic_pointwise ( self . _add_eigenvector , { ' T_sym ' : T_sym } , { ' eigenvalue ' : eigenvalue } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_IPF_color ( l : FloatSequence , q : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-06-20 20:45:13 +05:30
m = util . scale_to_coprime ( np . array ( l ) )
2021-06-01 10:19:14 +05:30
lattice = q [ ' meta ' ] [ ' lattice ' ]
o = Orientation ( rotation = q [ ' data ' ] , lattice = lattice )
2020-02-21 23:54:26 +05:30
return {
2020-11-18 21:15:53 +05:30
' data ' : np . uint8 ( o . IPF_color ( l ) * 255 ) ,
2021-04-04 22:35:58 +05:30
' label ' : ' IPFcolor_( {} {} {} ) ' . format ( * m ) ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : ' 8-bit RGB ' ,
' lattice ' : q [ ' meta ' ] [ ' lattice ' ] ,
2021-04-04 22:35:58 +05:30
' description ' : ' Inverse Pole Figure (IPF) colors along sample direction ( {} {} {} ) ' . format ( * m ) ,
2021-03-25 23:52:59 +05:30
' creator ' : ' add_IPF_color '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_IPF_color ( self ,
2022-05-09 03:51:53 +05:30
l : FloatSequence ,
2022-04-27 19:56:33 +05:30
q : str = ' O ' ) :
2020-02-21 12:15:05 +05:30
"""
Add RGB color tuple of inverse pole figure ( IPF ) color .
Parameters
- - - - - - - - - -
2020-02-21 22:12:01 +05:30
l : numpy . array of shape ( 3 )
2020-03-19 12:15:31 +05:30
Lab frame direction for inverse pole figure .
2022-11-07 22:48:00 +05:30
q : str , optional
2021-04-24 10:43:36 +05:30
Name of the dataset containing the crystallographic orientation as quaternions .
2020-12-02 19:15:47 +05:30
Defaults to ' O ' .
2020-02-21 12:15:05 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
Add the IPF color along [ 0 , 1 , 1 ] for orientation ' O ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_IPF_color ( np . array ( [ 0 , 1 , 1 ] ) )
2020-02-21 12:15:05 +05:30
"""
2020-07-01 01:13:57 +05:30
self . _add_generic_pointwise ( self . _add_IPF_color , { ' q ' : q } , { ' l ' : l } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-02 14:48:35 +05:30
def _add_maximum_shear ( T_sym : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-03-03 03:44:59 +05:30
' data ' : mechanics . maximum_shear ( T_sym [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " max_shear( { T_sym [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T_sym [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " maximum shear component of { T_sym [ ' label ' ] } ( { T_sym [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_maximum_shear '
2020-02-21 23:54:26 +05:30
}
2020-02-21 12:15:05 +05:30
}
2022-04-27 19:56:33 +05:30
def add_maximum_shear ( self , T_sym : str ) :
2020-02-21 12:15:05 +05:30
"""
Add maximum shear components of symmetric tensor .
Parameters
- - - - - - - - - -
2020-03-03 03:44:59 +05:30
T_sym : str
2021-04-24 10:43:36 +05:30
Name of symmetric tensor dataset .
2020-02-21 12:15:05 +05:30
"""
2020-03-03 03:44:59 +05:30
self . _add_generic_pointwise ( self . _add_maximum_shear , { ' T_sym ' : T_sym } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_equivalent_Mises ( T_sym : Dict [ str , Any ] , kind : str ) - > Dict [ str , Any ] :
2020-11-06 04:17:37 +05:30
k = kind
if k is None :
2021-03-25 23:52:59 +05:30
if T_sym [ ' meta ' ] [ ' unit ' ] == ' 1 ' :
2020-11-05 10:18:12 +05:30
k = ' strain '
2021-03-25 23:52:59 +05:30
elif T_sym [ ' meta ' ] [ ' unit ' ] == ' Pa ' :
2020-11-05 10:18:12 +05:30
k = ' stress '
if k not in [ ' stress ' , ' strain ' ] :
2022-02-22 21:12:05 +05:30
raise ValueError ( f ' invalid von Mises kind " { kind } " ' )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
return {
2020-11-18 03:26:22 +05:30
' data ' : ( mechanics . equivalent_strain_Mises if k == ' strain ' else \
mechanics . equivalent_stress_Mises ) ( T_sym [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " { T_sym [ ' label ' ] } _vM " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T_sym [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " Mises equivalent { k } of { T_sym [ ' label ' ] } ( { T_sym [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_Mises '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_equivalent_Mises ( self ,
T_sym : str ,
2022-11-23 02:56:15 +05:30
kind : Optional [ str ] = None ) :
2020-02-21 12:15:05 +05:30
"""
Add the equivalent Mises stress or strain of a symmetric tensor .
Parameters
- - - - - - - - - -
2020-03-03 03:44:59 +05:30
T_sym : str
2021-04-24 10:43:36 +05:30
Name of symmetric tensorial stress or strain dataset .
2020-11-05 10:18:12 +05:30
kind : { ' stress ' , ' strain ' , None } , optional
Kind of the von Mises equivalent . Defaults to None , in which case
2021-04-06 21:09:44 +05:30
it is selected based on the unit of the dataset ( ' 1 ' - > strain , ' Pa ' - > stress ) .
2020-02-21 12:15:05 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
Add the Mises equivalent of the Cauchy stress ' sigma ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_equivalent_Mises ( ' sigma ' )
Add the Mises equivalent of the spatial logarithmic strain ' epsilon_V^0.0(F) ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_equivalent_Mises ( ' epsilon_V^0.0(F) ' )
2020-02-21 12:15:05 +05:30
"""
2020-11-18 03:26:22 +05:30
self . _add_generic_pointwise ( self . _add_equivalent_Mises , { ' T_sym ' : T_sym } , { ' kind ' : kind } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-21 20:03:35 +05:30
def _add_norm ( x : Dict [ str , Any ] , ord : Union [ int , float , Literal [ ' fro ' , ' nuc ' ] ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
o = ord
if len ( x [ ' data ' ] . shape ) == 2 :
2022-04-27 19:56:33 +05:30
axis : Union [ int , Tuple [ int , int ] ] = 1
2020-02-21 23:54:26 +05:30
t = ' vector '
if o is None : o = 2
elif len ( x [ ' data ' ] . shape ) == 3 :
axis = ( 1 , 2 )
t = ' tensor '
if o is None : o = ' fro '
else :
2022-01-15 17:52:15 +05:30
raise ValueError ( f ' invalid shape of { x [ " label " ] } ' )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
return {
' data ' : np . linalg . norm ( x [ ' data ' ] , ord = o , axis = axis , keepdims = True ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " | { x [ ' label ' ] } |_ { o } " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : x [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " { o } -norm of { t } { x [ ' label ' ] } ( { x [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_norm '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_norm ( self ,
x : str ,
2022-11-23 02:56:15 +05:30
ord : Union [ None , int , float , Literal [ ' fro ' , ' nuc ' ] ] = None ) :
2020-02-21 12:15:05 +05:30
"""
Add the norm of vector or tensor .
Parameters
- - - - - - - - - -
x : str
2021-04-24 10:43:36 +05:30
Name of vector or tensor dataset .
2021-04-06 21:09:44 +05:30
ord : { non - zero int , inf , - inf , ' fro ' , ' nuc ' } , optional
2020-03-19 12:15:31 +05:30
Order of the norm . inf means NumPy ’ s inf object . For details refer to numpy . linalg . norm .
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_norm , { ' x ' : x } , { ' ord ' : ord } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_stress_second_Piola_Kirchhoff ( P : Dict [ str , Any ] , F : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-18 03:26:22 +05:30
' data ' : mechanics . stress_second_Piola_Kirchhoff ( P [ ' data ' ] , F [ ' data ' ] ) ,
2020-02-21 23:54:26 +05:30
' label ' : ' S ' ,
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : P [ ' meta ' ] [ ' unit ' ] ,
' description ' : " second Piola-Kirchhoff stress calculated "
f " from { P [ ' label ' ] } ( { P [ ' meta ' ] [ ' description ' ] } ) "
f " and { F [ ' label ' ] } ( { F [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_stress_second_Piola_Kirchhoff '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_stress_second_Piola_Kirchhoff ( self ,
P : str = ' P ' ,
F : str = ' F ' ) :
2020-02-21 12:15:05 +05:30
"""
2020-06-25 01:04:51 +05:30
Add second Piola - Kirchhoff stress calculated from first Piola - Kirchhoff stress and deformation gradient .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
P : str , optional
2021-04-24 10:43:36 +05:30
Name of first Piola - Kirchhoff stress dataset . Defaults to ' P ' .
2020-02-21 12:15:05 +05:30
F : str , optional
2021-04-24 10:43:36 +05:30
Name of deformation gradient dataset . Defaults to ' F ' .
2020-02-21 12:15:05 +05:30
2021-04-25 21:04:41 +05:30
Notes
- - - - -
2021-04-26 23:56:16 +05:30
The definition of the second Piola - Kirchhoff stress ( S = [ F ^ - 1 P ] _sym )
follows the standard definition in nonlinear continuum mechanics .
As such , no intermediate configuration , for instance that reached by F_p ,
is taken into account .
2021-04-25 21:04:41 +05:30
2020-02-21 12:15:05 +05:30
"""
2020-11-18 03:26:22 +05:30
self . _add_generic_pointwise ( self . _add_stress_second_Piola_Kirchhoff , { ' P ' : P , ' F ' : F } )
2020-02-21 12:15:05 +05:30
2021-07-18 21:33:36 +05:30
@staticmethod
2022-04-27 19:56:33 +05:30
def _add_pole ( q : Dict [ str , Any ] ,
2022-05-02 14:48:35 +05:30
uvw : FloatSequence ,
hkl : FloatSequence ,
2022-05-21 20:03:35 +05:30
with_symmetry : bool ,
normalize : bool ) - > Dict [ str , Any ] :
2021-07-18 21:33:36 +05:30
c = q [ ' meta ' ] [ ' c/a ' ] if ' c/a ' in q [ ' meta ' ] else 1
2022-05-11 10:32:20 +05:30
brackets = [ ' [] ' , ' () ' , ' ⟨⟩ ' , ' {} ' ] [ ( uvw is None ) * 1 + with_symmetry * 2 ]
2022-05-11 00:55:45 +05:30
label = ' p^ ' + ' {} {} {} {} {} ' . format ( brackets [ 0 ] ,
* ( uvw if uvw else hkl ) ,
brackets [ - 1 ] , )
2022-05-13 13:23:44 +05:30
ori = Orientation ( q [ ' data ' ] , lattice = q [ ' meta ' ] [ ' lattice ' ] , a = 1 , c = c )
2021-07-18 21:33:36 +05:30
return {
2022-05-13 13:23:44 +05:30
' data ' : ori . to_pole ( uvw = uvw , hkl = hkl , with_symmetry = with_symmetry , normalize = normalize ) ,
2022-05-11 00:55:45 +05:30
' label ' : label ,
2021-07-18 21:33:36 +05:30
' meta ' : {
2022-05-11 00:55:45 +05:30
' unit ' : ' 1 ' ,
2022-05-13 13:23:44 +05:30
' description ' : f ' { " normalized " if normalize else " " } lab frame vector along lattice ' \
+ ( ' direction ' if uvw is not None else ' plane ' ) \
2022-05-11 00:55:45 +05:30
+ ( ' s ' if with_symmetry else ' ' ) ,
' creator ' : ' add_pole '
2021-07-18 21:33:36 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_pole ( self ,
q : str = ' O ' ,
* ,
2022-11-23 02:56:15 +05:30
uvw : Optional [ FloatSequence ] = None ,
hkl : Optional [ FloatSequence ] = None ,
2022-05-21 20:03:35 +05:30
with_symmetry : bool = False ,
normalize : bool = True ) :
2021-07-18 21:33:36 +05:30
"""
Add lab frame vector along lattice direction [ uvw ] or plane normal ( hkl ) .
Parameters
- - - - - - - - - -
2022-11-07 22:48:00 +05:30
q : str , optional
2021-07-18 21:33:36 +05:30
Name of the dataset containing the crystallographic orientation as quaternions .
Defaults to ' O ' .
2022-05-09 03:51:53 +05:30
uvw | hkl : numpy . ndarray of shape ( 3 )
2021-07-18 21:33:36 +05:30
Miller indices of crystallographic direction or plane normal .
2021-08-16 22:56:15 +05:30
with_symmetry : bool , optional
Calculate all N symmetrically equivalent vectors .
2022-05-13 13:23:44 +05:30
Defaults to True .
normalize : bool , optional
Normalize output vector .
Defaults to True .
2021-07-18 21:33:36 +05:30
"""
2022-05-13 13:23:44 +05:30
self . _add_generic_pointwise ( self . _add_pole ,
{ ' q ' : q } ,
{ ' uvw ' : uvw , ' hkl ' : hkl , ' with_symmetry ' : with_symmetry , ' normalize ' : normalize } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_rotation ( F : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-20 03:16:52 +05:30
' data ' : mechanics . rotation ( F [ ' data ' ] ) . as_matrix ( ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " R( { F [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : F [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " rotational part of { F [ ' label ' ] } ( { F [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_rotation '
2020-02-21 23:54:26 +05:30
}
2020-02-21 12:15:05 +05:30
}
2022-04-27 19:56:33 +05:30
def add_rotation ( self , F : str ) :
2020-02-21 12:15:05 +05:30
"""
Add rotational part of a deformation gradient .
Parameters
- - - - - - - - - -
2021-04-24 10:43:36 +05:30
F : str
Name of deformation gradient dataset .
Examples
- - - - - - - -
Add the rotational part of deformation gradient ' F ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_rotation ( ' F ' )
2020-02-21 12:15:05 +05:30
"""
2020-11-20 03:16:52 +05:30
self . _add_generic_pointwise ( self . _add_rotation , { ' F ' : F } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_spherical ( T : Dict [ str , Any ] ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-19 19:08:54 +05:30
' data ' : tensor . spherical ( T [ ' data ' ] , False ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " p_ { T [ ' label ' ] } " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : T [ ' meta ' ] [ ' unit ' ] ,
' description ' : f " spherical component of tensor { T [ ' label ' ] } ( { T [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_spherical '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_spherical ( self , T : str ) :
2020-02-21 12:15:05 +05:30
"""
Add the spherical ( hydrostatic ) part of a tensor .
Parameters
- - - - - - - - - -
2020-02-21 17:33:50 +05:30
T : str
2021-04-24 10:43:36 +05:30
Name of tensor dataset .
Examples
- - - - - - - -
Add the hydrostatic part of the Cauchy stress ' sigma ' :
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
>> > r . add_spherical ( ' sigma ' )
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_spherical , { ' T ' : T } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_strain ( F : Dict [ str , Any ] , t : Literal [ ' V ' , ' U ' ] , m : float ) - > Dict [ str , Any ] :
2022-12-26 17:58:53 +05:30
side = ' left ' if t == ' V ' else ' right '
2020-02-21 23:54:26 +05:30
return {
2020-11-16 05:42:23 +05:30
' data ' : mechanics . strain ( F [ ' data ' ] , t , m ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " epsilon_ { t } ^ { m } ( { F [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : F [ ' meta ' ] [ ' unit ' ] ,
2022-12-26 17:58:53 +05:30
' description ' : f ' strain tensor of order { m } based on { side } stretch tensor ' + \
f " of { F [ ' label ' ] } ( { F [ ' meta ' ] [ ' description ' ] } ) " ,
2021-03-25 23:52:59 +05:30
' creator ' : ' add_strain '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_strain ( self ,
F : str = ' F ' ,
t : Literal [ ' V ' , ' U ' ] = ' V ' ,
m : float = 0.0 ) :
2020-02-21 12:15:05 +05:30
"""
2020-02-21 17:33:50 +05:30
Add strain tensor of a deformation gradient .
2020-02-21 12:15:05 +05:30
2021-04-06 21:09:44 +05:30
For details , see damask . mechanics . strain .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
F : str , optional
2021-04-24 10:43:36 +05:30
Name of deformation gradient dataset . Defaults to ' F ' .
2021-04-06 21:09:44 +05:30
t : { ' V ' , ' U ' } , optional
Type of the polar decomposition , ' V ' for left stretch tensor and ' U ' for right stretch tensor .
Defaults to ' V ' .
2020-02-21 12:15:05 +05:30
m : float , optional
2021-04-06 21:09:44 +05:30
Order of the strain calculation . Defaults to 0.0 .
2020-02-21 12:15:05 +05:30
2021-04-24 10:43:36 +05:30
Examples
- - - - - - - -
2022-12-26 17:58:53 +05:30
Add the Euler - Almansi strain :
2021-04-24 10:43:36 +05:30
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2022-12-26 17:58:53 +05:30
>> > r . add_strain ( t = ' V ' , m = - 1.0 )
2021-04-24 10:43:36 +05:30
2022-12-26 17:58:53 +05:30
Add the plastic Biot strain :
2021-04-24 10:43:36 +05:30
>> > import damask
>> > r = damask . Result ( ' my_file.hdf5 ' )
2022-12-26 17:58:53 +05:30
>> > r . add_strain ( ' F_p ' , ' U ' , 0.5 )
Notes
- - - - -
The incoporation of rotational parts into the elastic and plastic
deformation gradient requires it to use material / Lagragian strain measures
( based on ' U ' ) for plastic strains and spatial / Eulerian strain measures
( based on ' V ' ) for elastic strains when calculating averages .
2021-04-24 10:43:36 +05:30
2020-02-21 12:15:05 +05:30
"""
2020-11-16 05:42:23 +05:30
self . _add_generic_pointwise ( self . _add_strain , { ' F ' : F } , { ' t ' : t , ' m ' : m } )
2020-02-21 12:15:05 +05:30
2020-02-21 23:54:26 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_stretch_tensor ( F : Dict [ str , Any ] , t : str ) - > Dict [ str , Any ] :
2020-02-21 23:54:26 +05:30
return {
2020-11-16 05:31:32 +05:30
' data ' : ( mechanics . stretch_left if t . upper ( ) == ' V ' else mechanics . stretch_right ) ( F [ ' data ' ] ) ,
2020-06-25 01:04:51 +05:30
' label ' : f " { t } ( { F [ ' label ' ] } ) " ,
2020-02-21 23:54:26 +05:30
' meta ' : {
2021-03-25 23:52:59 +05:30
' unit ' : F [ ' meta ' ] [ ' unit ' ] ,
2021-05-28 16:50:56 +05:30
' description ' : f " { ' left ' if t . upper ( ) == ' V ' else ' right ' } stretch tensor " \
2021-05-28 16:55:28 +05:30
+ f " of { F [ ' label ' ] } ( { F [ ' meta ' ] [ ' description ' ] } ) " , # noqa
2021-03-25 23:52:59 +05:30
' creator ' : ' add_stretch_tensor '
2020-02-21 23:54:26 +05:30
}
}
2022-04-27 19:56:33 +05:30
def add_stretch_tensor ( self ,
F : str = ' F ' ,
t : Literal [ ' V ' , ' U ' ] = ' V ' ) :
2020-02-21 12:15:05 +05:30
"""
2020-02-21 17:33:50 +05:30
Add stretch tensor of a deformation gradient .
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
F : str , optional
2021-04-24 10:43:36 +05:30
Name of deformation gradient dataset . Defaults to ' F ' .
2021-04-06 21:09:44 +05:30
t : { ' V ' , ' U ' } , optional
Type of the polar decomposition , ' V ' for left stretch tensor and ' U ' for right stretch tensor .
Defaults to ' V ' .
2020-02-21 12:15:05 +05:30
"""
2020-02-22 02:07:02 +05:30
self . _add_generic_pointwise ( self . _add_stretch_tensor , { ' F ' : F } , { ' t ' : t } )
2020-02-21 23:54:26 +05:30
2021-05-28 16:55:28 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_curl ( f : Dict [ str , Any ] , size : np . ndarray ) - > Dict [ str , Any ] :
2021-05-28 16:55:28 +05:30
return {
' data ' : grid_filters . curl ( size , f [ ' data ' ] ) ,
' label ' : f " curl( { f [ ' label ' ] } ) " ,
' meta ' : {
' unit ' : f [ ' meta ' ] [ ' unit ' ] + ' /m ' ,
' description ' : f " curl of { f [ ' label ' ] } ( { f [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_curl '
}
}
2022-04-27 19:56:33 +05:30
def add_curl ( self , f : str ) :
2021-05-28 16:55:28 +05:30
"""
Add curl of a field .
Parameters
- - - - - - - - - -
f : str
Name of vector or tensor field dataset .
2021-05-29 00:27:44 +05:30
Notes
- - - - -
This function is only available for structured grids ,
i . e . results from the grid solver .
2021-05-28 16:55:28 +05:30
"""
2021-05-29 00:27:44 +05:30
self . _add_generic_grid ( self . _add_curl , { ' f ' : f } , { ' size ' : self . size } )
2021-05-28 16:55:28 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_divergence ( f : Dict [ str , Any ] , size : np . ndarray ) - > Dict [ str , Any ] :
2021-05-28 16:55:28 +05:30
return {
' data ' : grid_filters . divergence ( size , f [ ' data ' ] ) ,
' label ' : f " divergence( { f [ ' label ' ] } ) " ,
' meta ' : {
' unit ' : f [ ' meta ' ] [ ' unit ' ] + ' /m ' ,
' description ' : f " divergence of { f [ ' label ' ] } ( { f [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_divergence '
}
}
2022-04-27 19:56:33 +05:30
def add_divergence ( self , f : str ) :
2021-05-28 16:55:28 +05:30
"""
Add divergence of a field .
Parameters
- - - - - - - - - -
f : str
Name of vector or tensor field dataset .
2021-05-29 00:27:44 +05:30
Notes
- - - - -
This function is only available for structured grids ,
i . e . results from the grid solver .
2021-05-28 16:55:28 +05:30
"""
2021-05-29 00:27:44 +05:30
self . _add_generic_grid ( self . _add_divergence , { ' f ' : f } , { ' size ' : self . size } )
2021-05-28 16:55:28 +05:30
@staticmethod
2022-05-09 03:51:53 +05:30
def _add_gradient ( f : Dict [ str , Any ] , size : np . ndarray ) - > Dict [ str , Any ] :
2021-05-28 16:55:28 +05:30
return {
' data ' : grid_filters . gradient ( size , f [ ' data ' ] if len ( f [ ' data ' ] . shape ) == 4 else \
f [ ' data ' ] . reshape ( f [ ' data ' ] . shape + ( 1 , ) ) ) ,
' label ' : f " gradient( { f [ ' label ' ] } ) " ,
' meta ' : {
' unit ' : f [ ' meta ' ] [ ' unit ' ] + ' /m ' ,
' description ' : f " gradient of { f [ ' label ' ] } ( { f [ ' meta ' ] [ ' description ' ] } ) " ,
' creator ' : ' add_gradient '
}
}
2022-04-27 19:56:33 +05:30
def add_gradient ( self , f : str ) :
2021-05-28 16:55:28 +05:30
"""
Add gradient of a field .
Parameters
- - - - - - - - - -
f : str
Name of scalar or vector field dataset .
2021-05-29 00:27:44 +05:30
Notes
- - - - -
This function is only available for structured grids ,
i . e . results from the grid solver .
2021-05-28 16:55:28 +05:30
"""
2021-05-29 00:27:44 +05:30
self . _add_generic_grid ( self . _add_gradient , { ' f ' : f } , { ' size ' : self . size } )
2021-05-28 16:55:28 +05:30
2022-04-27 19:56:33 +05:30
def _add_generic_grid ( self ,
func : Callable ,
datasets : Dict [ str , str ] ,
args : Dict [ str , str ] = { } ,
2022-05-02 14:48:35 +05:30
constituents = None ) :
2021-05-28 16:55:28 +05:30
"""
General function to add data on a regular grid .
Parameters
- - - - - - - - - -
func : function
Callback function that calculates a new dataset from one or
more datasets per HDF5 group .
datasets : dictionary
Details of the datasets to be used :
{ arg ( name to which the data is passed in func ) : label ( in HDF5 file ) } .
args : dictionary , optional
Arguments parsed to func .
"""
2021-08-18 14:14:04 +05:30
if len ( datasets ) != 1 or self . N_constituents != 1 :
2021-05-28 16:55:28 +05:30
raise NotImplementedError
at_cell_ph , in_data_ph , at_cell_ho , in_data_ho = self . _mappings ( )
2022-04-27 19:56:33 +05:30
increments = self . place ( list ( datasets . values ( ) ) , False )
if not increments : raise RuntimeError ( " received invalid dataset " )
2021-05-28 16:55:28 +05:30
with h5py . File ( self . fname , ' a ' ) as f :
2022-04-27 19:56:33 +05:30
for increment in increments . items ( ) :
2021-05-28 16:55:28 +05:30
for ty in increment [ 1 ] . items ( ) :
for field in ty [ 1 ] . items ( ) :
2022-04-27 19:56:33 +05:30
d : np . ma . MaskedArray = list ( field [ 1 ] . values ( ) ) [ 0 ]
2021-05-28 16:55:28 +05:30
if np . any ( d . mask ) : continue
dataset = { ' f ' : { ' data ' : np . reshape ( d . data , tuple ( self . cells ) + d . data . shape [ 1 : ] ) ,
' label ' : list ( datasets . values ( ) ) [ 0 ] ,
' meta ' : d . data . dtype . metadata } }
r = func ( * * dataset , * * args )
result = r [ ' data ' ] . reshape ( ( - 1 , ) + r [ ' data ' ] . shape [ 3 : ] )
for x in self . visible [ ty [ 0 ] + ' s ' ] :
if ty [ 0 ] == ' phase ' :
result1 = result [ at_cell_ph [ 0 ] [ x ] ]
if ty [ 0 ] == ' homogenization ' :
result1 = result [ at_cell_ho [ x ] ]
path = ' / ' . join ( [ ' / ' , increment [ 0 ] , ty [ 0 ] , x , field [ 0 ] ] )
2022-05-21 20:03:35 +05:30
h5_dataset = f [ path ] . create_dataset ( r [ ' label ' ] , data = result1 )
2021-05-28 16:55:28 +05:30
now = datetime . datetime . now ( ) . astimezone ( )
2022-04-27 19:56:33 +05:30
h5_dataset . attrs [ ' created ' ] = now . strftime ( ' % Y- % m- %d % H: % M: % S % z ' ) if h5py3 else \
2022-05-21 20:03:35 +05:30
now . strftime ( ' % Y- % m- %d % H: % M: % S % z ' ) . encode ( )
2021-05-28 16:55:28 +05:30
for l , v in r [ ' meta ' ] . items ( ) :
2022-04-27 19:56:33 +05:30
h5_dataset . attrs [ l . lower ( ) ] = v if h5py3 else v . encode ( )
creator = h5_dataset . attrs [ ' creator ' ] if h5py3 else \
h5_dataset . attrs [ ' creator ' ] . decode ( )
h5_dataset . attrs [ ' creator ' ] = f ' damask.Result. { creator } v { damask . version } ' if h5py3 else \
2022-05-21 20:03:35 +05:30
f ' damask.Result. { creator } v { damask . version } ' . encode ( )
2021-05-28 16:55:28 +05:30
2022-04-27 19:56:33 +05:30
def _job_pointwise ( self ,
group : str ,
2022-04-28 13:15:10 +05:30
callback : Callable ,
2022-04-27 19:56:33 +05:30
datasets : Dict [ str , str ] ,
args : Dict [ str , str ] ,
2022-05-09 03:51:53 +05:30
lock : Lock ) - > List [ Union [ None , Any ] ] :
2020-02-22 04:29:33 +05:30
""" Execute job for _add_generic_pointwise. """
2020-02-21 23:54:26 +05:30
try :
2020-02-22 03:46:25 +05:30
datasets_in = { }
lock . acquire ( )
with h5py . File ( self . fname , ' r ' ) as f :
2020-03-12 03:05:58 +05:30
for arg , label in datasets . items ( ) :
loc = f [ group + ' / ' + label ]
datasets_in [ arg ] = { ' data ' : loc [ ( ) ] ,
' label ' : label ,
2021-07-30 23:39:47 +05:30
' meta ' : { k : ( v . decode ( ) if not h5py3 and type ( v ) is bytes else v ) \
for k , v in loc . attrs . items ( ) } }
2020-02-22 03:46:25 +05:30
lock . release ( )
2022-04-28 13:15:10 +05:30
r = callback ( * * datasets_in , * * args )
2020-02-21 23:54:26 +05:30
return [ group , r ]
except Exception as err :
2020-06-25 01:04:51 +05:30
print ( f ' Error during calculation: { err } . ' )
2021-05-28 16:55:28 +05:30
return [ None , None ]
2020-02-21 23:54:26 +05:30
2022-05-21 20:03:35 +05:30
2022-04-27 19:56:33 +05:30
def _add_generic_pointwise ( self ,
func : Callable ,
datasets : Dict [ str , Any ] ,
args : Dict [ str , Any ] = { } ) :
2020-02-22 03:46:25 +05:30
"""
General function to add pointwise data .
2020-02-21 23:54:26 +05:30
2020-02-22 03:46:25 +05:30
Parameters
- - - - - - - - - -
2022-04-28 13:15:10 +05:30
callback : function
2020-06-03 23:54:18 +05:30
Callback function that calculates a new dataset from one or
more datasets per HDF5 group .
2020-02-22 03:46:25 +05:30
datasets : dictionary
2021-04-06 21:09:44 +05:30
Details of the datasets to be used :
{ arg ( name to which the data is passed in func ) : label ( in HDF5 file ) } .
2020-02-22 03:46:25 +05:30
args : dictionary , optional
2020-03-19 12:15:31 +05:30
Arguments parsed to func .
2020-02-22 04:29:33 +05:30
2020-02-22 03:46:25 +05:30
"""
2021-04-28 23:56:25 +05:30
pool = mp . Pool ( int ( os . environ . get ( ' OMP_NUM_THREADS ' , 4 ) ) )
2020-06-26 15:15:06 +05:30
lock = mp . Manager ( ) . Lock ( )
2020-02-22 03:46:25 +05:30
2021-04-04 22:02:17 +05:30
groups = [ ]
with h5py . File ( self . fname , ' r ' ) as f :
for inc in self . visible [ ' increments ' ] :
2021-04-05 11:23:19 +05:30
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
2021-04-14 22:54:45 +05:30
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
2021-04-05 11:23:19 +05:30
group = ' / ' . join ( [ inc , ty , label , field ] )
2021-04-04 22:02:17 +05:30
if set ( datasets . values ( ) ) . issubset ( f [ group ] . keys ( ) ) : groups . append ( group )
2020-08-27 03:44:37 +05:30
if len ( groups ) == 0 :
print ( ' No matching dataset found, no data was added. ' )
return
2022-04-28 13:15:10 +05:30
default_arg = partial ( self . _job_pointwise , callback = func , datasets = datasets , args = args , lock = lock )
2020-02-22 03:46:25 +05:30
2022-05-23 11:31:17 +05:30
for group , result in util . show_progress ( pool . imap_unordered ( default_arg , groups ) , len ( groups ) ) : # type: ignore
2020-03-09 18:09:20 +05:30
if not result :
continue
2020-02-22 03:46:25 +05:30
lock . acquire ( )
with h5py . File ( self . fname , ' a ' ) as f :
2020-05-25 22:42:31 +05:30
try :
2021-12-17 15:07:45 +05:30
if not self . _protected and ' / ' . join ( [ group , result [ ' label ' ] ] ) in f :
2021-05-28 16:50:56 +05:30
dataset = f [ ' / ' . join ( [ group , result [ ' label ' ] ] ) ]
dataset [ . . . ] = result [ ' data ' ]
2021-03-25 23:52:59 +05:30
dataset . attrs [ ' overwritten ' ] = True
2020-05-25 22:42:31 +05:30
else :
2021-08-14 18:37:01 +05:30
shape = result [ ' data ' ] . shape
2022-05-23 11:31:17 +05:30
if compress := ( result [ ' data ' ] . size > = chunk_size * 2 ) :
2020-12-06 14:50:32 +05:30
chunks = ( chunk_size / / np . prod ( shape [ 1 : ] ) , ) + shape [ 1 : ]
else :
2021-08-14 18:37:01 +05:30
chunks = shape
dataset = f [ group ] . create_dataset ( result [ ' label ' ] , data = result [ ' data ' ] ,
maxshape = shape , chunks = chunks ,
2022-05-21 20:03:35 +05:30
compression = ' gzip ' if compress else None ,
compression_opts = 6 if compress else None ,
2021-08-14 18:37:01 +05:30
shuffle = True , fletcher32 = True )
2020-05-25 23:27:32 +05:30
2020-05-25 22:20:31 +05:30
now = datetime . datetime . now ( ) . astimezone ( )
2021-03-25 23:52:59 +05:30
dataset . attrs [ ' created ' ] = now . strftime ( ' % Y- % m- %d % H: % M: % S % z ' ) if h5py3 else \
2020-11-05 20:43:29 +05:30
now . strftime ( ' % Y- % m- %d % H: % M: % S % z ' ) . encode ( )
2020-05-25 23:27:32 +05:30
2021-05-28 16:50:56 +05:30
for l , v in result [ ' meta ' ] . items ( ) :
2021-07-30 23:39:47 +05:30
dataset . attrs [ l . lower ( ) ] = v . encode ( ) if not h5py3 and type ( v ) is str else v
2021-03-25 23:52:59 +05:30
creator = dataset . attrs [ ' creator ' ] if h5py3 else \
dataset . attrs [ ' creator ' ] . decode ( )
2021-04-03 11:38:48 +05:30
dataset . attrs [ ' creator ' ] = f ' damask.Result. { creator } v { damask . version } ' if h5py3 else \
f ' damask.Result. { creator } v { damask . version } ' . encode ( )
2020-05-25 23:27:32 +05:30
2020-05-26 10:09:11 +05:30
except ( OSError , RuntimeError ) as err :
2020-06-25 01:04:51 +05:30
print ( f ' Could not add dataset: { err } . ' )
2020-02-22 03:46:25 +05:30
lock . release ( )
pool . close ( )
pool . join ( )
2020-02-21 12:15:05 +05:30
2022-11-09 20:09:47 +05:30
def _mappings ( self ) :
""" Mappings to place data spatially. """
with h5py . File ( self . fname , ' r ' ) as f :
at_cell_ph = [ ]
in_data_ph = [ ]
for c in range ( self . N_constituents ) :
at_cell_ph . append ( { label : np . where ( self . phase [ : , c ] == label ) [ 0 ] \
for label in self . visible [ ' phases ' ] } )
in_data_ph . append ( { label : f [ ' / ' . join ( [ ' cell_to ' , ' phase ' ] ) ] [ ' entry ' ] [ at_cell_ph [ c ] [ label ] ] [ : , c ] \
for label in self . visible [ ' phases ' ] } )
at_cell_ho = { label : np . where ( self . homogenization [ : ] == label ) [ 0 ] \
for label in self . visible [ ' homogenizations ' ] }
in_data_ho = { label : f [ ' / ' . join ( [ ' cell_to ' , ' homogenization ' ] ) ] [ ' entry ' ] [ at_cell_ho [ label ] ] \
for label in self . visible [ ' homogenizations ' ] }
return at_cell_ph , in_data_ph , at_cell_ho , in_data_ho
def get ( self ,
output : Union [ str , List [ str ] ] = ' * ' ,
flatten : bool = True ,
2022-11-11 11:33:14 +05:30
prune : bool = True ) - > Optional [ Dict [ str , Any ] ] :
2022-11-09 20:09:47 +05:30
"""
Collect data per phase / homogenization reflecting the group / folder structure in the DADF5 file .
Parameters
- - - - - - - - - -
output : ( list of ) str , optional
Names of the datasets to read .
Defaults to ' * ' , in which case all datasets are read .
flatten : bool , optional
Remove singular levels of the folder hierarchy .
This might be beneficial in case of single increment ,
phase / homogenization , or field . Defaults to True .
prune : bool , optional
Remove branches with no data . Defaults to True .
Returns
- - - - - - -
data : dict of numpy . ndarray
Datasets structured by phase / homogenization and according to selected view .
"""
2022-11-11 11:33:14 +05:30
r : Dict [ str , Any ] = { }
2022-11-09 20:09:47 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
for inc in util . show_progress ( self . visible [ ' increments ' ] ) :
r [ inc ] = { ' phase ' : { } , ' homogenization ' : { } , ' geometry ' : { } }
for out in _match ( output , f [ ' / ' . join ( [ inc , ' geometry ' ] ) ] . keys ( ) ) :
r [ inc ] [ ' geometry ' ] [ out ] = _read ( f [ ' / ' . join ( [ inc , ' geometry ' , out ] ) ] )
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
r [ inc ] [ ty ] [ label ] = { }
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
r [ inc ] [ ty ] [ label ] [ field ] = { }
for out in _match ( output , f [ ' / ' . join ( [ inc , ty , label , field ] ) ] . keys ( ) ) :
r [ inc ] [ ty ] [ label ] [ field ] [ out ] = _read ( f [ ' / ' . join ( [ inc , ty , label , field , out ] ) ] )
if prune : r = util . dict_prune ( r )
if flatten : r = util . dict_flatten ( r )
return None if ( type ( r ) == dict and r == { } ) else r
def place ( self ,
output : Union [ str , List [ str ] ] = ' * ' ,
flatten : bool = True ,
prune : bool = True ,
2022-11-23 02:56:15 +05:30
constituents : Optional [ IntSequence ] = None ,
2022-11-09 20:09:47 +05:30
fill_float : float = np . nan ,
2022-11-11 11:33:14 +05:30
fill_int : int = 0 ) - > Optional [ Dict [ str , Any ] ] :
2022-11-09 20:09:47 +05:30
"""
Merge data into spatial order that is compatible with the damask . VTK geometry representation .
The returned data structure reflects the group / folder structure in the DADF5 file .
Multi - phase data is fused into a single output .
` place ` is equivalent to ` get ` if only one phase / homogenization
and one constituent is present .
Parameters
- - - - - - - - - -
output : ( list of ) str , optional
Names of the datasets to read .
Defaults to ' * ' , in which case all visible datasets are placed .
flatten : bool , optional
Remove singular levels of the folder hierarchy .
This might be beneficial in case of single increment or field .
Defaults to True .
prune : bool , optional
Remove branches with no data . Defaults to True .
constituents : ( list of ) int , optional
Constituents to consider .
Defaults to None , in which case all constituents are considered .
fill_float : float , optional
Fill value for non - existent entries of floating point type .
Defaults to NaN .
fill_int : int , optional
Fill value for non - existent entries of integer type .
Defaults to 0.
Returns
- - - - - - -
data : dict of numpy . ma . MaskedArray
Datasets structured by spatial position and according to selected view .
"""
2022-11-11 11:33:14 +05:30
r : Dict [ str , Any ] = { }
2022-11-09 20:09:47 +05:30
2022-11-11 11:33:14 +05:30
constituents_ = map ( int , constituents ) if isinstance ( constituents , Iterable ) else \
2022-11-09 20:09:47 +05:30
( range ( self . N_constituents ) if constituents is None else [ constituents ] ) # type: ignore
suffixes = [ ' ' ] if self . N_constituents == 1 or isinstance ( constituents , int ) else \
[ f ' # { c } ' for c in constituents_ ]
at_cell_ph , in_data_ph , at_cell_ho , in_data_ho = self . _mappings ( )
with h5py . File ( self . fname , ' r ' ) as f :
for inc in util . show_progress ( self . visible [ ' increments ' ] ) :
r [ inc ] = { ' phase ' : { } , ' homogenization ' : { } , ' geometry ' : { } }
for out in _match ( output , f [ ' / ' . join ( [ inc , ' geometry ' ] ) ] . keys ( ) ) :
r [ inc ] [ ' geometry ' ] [ out ] = ma . array ( _read ( f [ ' / ' . join ( [ inc , ' geometry ' , out ] ) ] ) , fill_value = fill_float )
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
if field not in r [ inc ] [ ty ] . keys ( ) :
r [ inc ] [ ty ] [ field ] = { }
for out in _match ( output , f [ ' / ' . join ( [ inc , ty , label , field ] ) ] . keys ( ) ) :
data = ma . array ( _read ( f [ ' / ' . join ( [ inc , ty , label , field , out ] ) ] ) )
if ty == ' phase ' :
if out + suffixes [ 0 ] not in r [ inc ] [ ty ] [ field ] . keys ( ) :
for c , suffix in zip ( constituents_ , suffixes ) :
r [ inc ] [ ty ] [ field ] [ out + suffix ] = \
_empty_like ( data , self . N_materialpoints , fill_float , fill_int )
for c , suffix in zip ( constituents_ , suffixes ) :
r [ inc ] [ ty ] [ field ] [ out + suffix ] [ at_cell_ph [ c ] [ label ] ] = data [ in_data_ph [ c ] [ label ] ]
if ty == ' homogenization ' :
if out not in r [ inc ] [ ty ] [ field ] . keys ( ) :
r [ inc ] [ ty ] [ field ] [ out ] = \
_empty_like ( data , self . N_materialpoints , fill_float , fill_int )
r [ inc ] [ ty ] [ field ] [ out ] [ at_cell_ho [ label ] ] = data [ in_data_ho [ label ] ]
if prune : r = util . dict_prune ( r )
if flatten : r = util . dict_flatten ( r )
return None if ( type ( r ) == dict and r == { } ) else r
2022-04-27 19:56:33 +05:30
def export_XDMF ( self ,
2022-11-06 23:40:23 +05:30
output : Union [ str , List [ str ] ] = ' * ' ,
2022-11-23 02:56:15 +05:30
target_dir : Union [ None , str , Path ] = None ,
2022-11-06 23:40:23 +05:30
absolute_path : bool = False ) :
2020-05-05 13:27:22 +05:30
"""
2021-07-27 13:01:57 +05:30
Write XDMF file to directly visualize data from DADF5 file .
2020-05-05 13:27:22 +05:30
2021-04-24 10:43:36 +05:30
The XDMF format is only supported for structured grids
with single phase and single constituent .
2021-06-17 21:56:37 +05:30
For other cases use ` export_VTK ` .
2021-04-24 10:43:36 +05:30
2021-04-05 13:59:34 +05:30
Parameters
- - - - - - - - - -
2022-11-07 22:48:00 +05:30
output : ( list of ) str , optional
2021-04-24 10:43:36 +05:30
Names of the datasets included in the XDMF file .
2021-04-06 21:09:44 +05:30
Defaults to ' * ' , in which case all datasets are considered .
2022-11-06 23:40:23 +05:30
target_dir : str or pathlib . Path , optional
Directory to save XDMF file . Will be created if non - existent .
absolute_path : bool , optional
Store absolute ( instead of relative ) path to DADF5 file .
Defaults to False , i . e . the XDMF file expects the
DADF5 file at a stable relative path .
2021-04-05 13:59:34 +05:30
2020-05-05 13:27:22 +05:30
"""
2020-11-30 15:10:46 +05:30
if self . N_constituents != 1 or len ( self . phases ) != 1 or not self . structured :
2021-04-06 21:09:44 +05:30
raise TypeError ( ' XDMF output requires structured grid with single phase and single constituent. ' )
2020-11-30 15:10:46 +05:30
attribute_type_map = defaultdict ( lambda : ' Matrix ' , ( ( ( ) , ' Scalar ' ) , ( ( 3 , ) , ' Vector ' ) , ( ( 3 , 3 ) , ' Tensor ' ) ) )
def number_type_map ( dtype ) :
if dtype in np . sctypes [ ' int ' ] : return ' Int '
if dtype in np . sctypes [ ' uint ' ] : return ' UInt '
if dtype in np . sctypes [ ' float ' ] : return ' Float '
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
xdmf = ET . Element ( ' Xdmf ' )
2022-11-06 23:40:23 +05:30
xdmf . attrib = { ' Version ' : ' 2.0 ' ,
' xmlns:xi ' : ' http://www.w3.org/2001/XInclude ' }
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
domain = ET . SubElement ( xdmf , ' Domain ' )
2020-05-05 13:27:22 +05:30
collection = ET . SubElement ( domain , ' Grid ' )
2022-11-06 23:40:23 +05:30
collection . attrib = { ' GridType ' : ' Collection ' ,
' CollectionType ' : ' Temporal ' ,
' Name ' : ' Increments ' }
2020-05-05 13:27:22 +05:30
time = ET . SubElement ( collection , ' Time ' )
2022-11-06 23:40:23 +05:30
time . attrib = { ' TimeType ' : ' List ' }
2020-05-05 13:27:22 +05:30
time_data = ET . SubElement ( time , ' DataItem ' )
2021-04-05 13:59:34 +05:30
times = [ self . times [ self . increments . index ( i ) ] for i in self . visible [ ' increments ' ] ]
2022-11-06 23:40:23 +05:30
time_data . attrib = { ' Format ' : ' XML ' ,
' NumberType ' : ' Float ' ,
' Dimensions ' : f ' { len ( times ) } ' }
2021-04-05 13:59:34 +05:30
time_data . text = ' ' . join ( map ( str , times ) )
2020-05-05 13:27:22 +05:30
attributes = [ ]
data_items = [ ]
2022-11-06 23:40:23 +05:30
hdf5_name = self . fname . name
hdf5_dir = self . fname . parent
xdmf_dir = Path . cwd ( ) if target_dir is None else Path ( target_dir )
hdf5_link = ( hdf5_dir if absolute_path else Path ( os . path . relpath ( hdf5_dir , xdmf_dir . resolve ( ) ) ) ) / hdf5_name
2021-04-05 19:28:10 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
for inc in self . visible [ ' increments ' ] :
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
grid = ET . SubElement ( collection , ' Grid ' )
2021-04-05 19:28:10 +05:30
grid . attrib = { ' GridType ' : ' Uniform ' ,
' Name ' : inc }
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
topology = ET . SubElement ( grid , ' Topology ' )
topology . attrib = { ' TopologyType ' : ' 3DCoRectMesh ' ,
2021-06-18 19:47:39 +05:30
' Dimensions ' : ' {} {} {} ' . format ( * ( self . cells [ : : - 1 ] + 1 ) ) }
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
geometry = ET . SubElement ( grid , ' Geometry ' )
geometry . attrib = { ' GeometryType ' : ' Origin_DxDyDz ' }
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
origin = ET . SubElement ( geometry , ' DataItem ' )
origin . attrib = { ' Format ' : ' XML ' ,
' NumberType ' : ' Float ' ,
' Dimensions ' : ' 3 ' }
2021-06-18 19:47:39 +05:30
origin . text = " {} {} {} " . format ( * self . origin [ : : - 1 ] )
2020-05-05 13:27:22 +05:30
2021-04-06 21:09:44 +05:30
delta = ET . SubElement ( geometry , ' DataItem ' )
delta . attrib = { ' Format ' : ' XML ' ,
' NumberType ' : ' Float ' ,
' Dimensions ' : ' 3 ' }
2021-06-18 19:47:39 +05:30
delta . text = " {} {} {} " . format ( * ( self . size / self . cells ) [ : : - 1 ] )
2020-05-05 13:27:22 +05:30
attributes . append ( ET . SubElement ( grid , ' Attribute ' ) )
2021-04-06 21:09:44 +05:30
attributes [ - 1 ] . attrib = { ' Name ' : ' u / m ' ,
' Center ' : ' Node ' ,
' AttributeType ' : ' Vector ' }
2020-05-05 13:27:22 +05:30
data_items . append ( ET . SubElement ( attributes [ - 1 ] , ' DataItem ' ) )
2021-04-06 21:09:44 +05:30
data_items [ - 1 ] . attrib = { ' Format ' : ' HDF ' ,
' Precision ' : ' 8 ' ,
2021-06-18 19:47:39 +05:30
' Dimensions ' : ' {} {} {} 3 ' . format ( * ( self . cells [ : : - 1 ] + 1 ) ) }
2022-11-06 23:40:23 +05:30
data_items [ - 1 ] . text = f ' { hdf5_link } :/ { inc } /geometry/u_n '
2021-04-05 11:23:19 +05:30
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
2021-04-14 22:54:45 +05:30
for field in _match ( self . visible [ ' fields ' ] , f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
2021-04-05 13:43:08 +05:30
for out in _match ( output , f [ ' / ' . join ( [ inc , ty , label , field ] ) ] . keys ( ) ) :
2021-04-05 11:23:19 +05:30
name = ' / ' . join ( [ inc , ty , label , field , out ] )
2020-05-05 13:27:22 +05:30
shape = f [ name ] . shape [ 1 : ]
dtype = f [ name ] . dtype
2021-04-14 10:36:24 +05:30
unit = f [ name ] . attrs [ ' unit ' ] if h5py3 else \
f [ name ] . attrs [ ' unit ' ] . decode ( )
2020-05-05 13:27:22 +05:30
attributes . append ( ET . SubElement ( grid , ' Attribute ' ) )
2021-04-10 13:02:21 +05:30
attributes [ - 1 ] . attrib = { ' Name ' : ' / ' . join ( [ ty , field , out ] ) + f ' / { unit } ' ,
2021-04-06 21:09:44 +05:30
' Center ' : ' Cell ' ,
' AttributeType ' : attribute_type_map [ shape ] }
2020-05-05 13:27:22 +05:30
data_items . append ( ET . SubElement ( attributes [ - 1 ] , ' DataItem ' ) )
2021-04-06 21:09:44 +05:30
data_items [ - 1 ] . attrib = { ' Format ' : ' HDF ' ,
' NumberType ' : number_type_map ( dtype ) ,
' Precision ' : f ' { dtype . itemsize } ' ,
2021-06-18 19:47:39 +05:30
' Dimensions ' : ' {} {} {} {} ' . format ( * self . cells [ : : - 1 ] , 1 if shape == ( ) else
2021-04-06 21:09:44 +05:30
np . prod ( shape ) ) }
2022-11-06 23:40:23 +05:30
data_items [ - 1 ] . text = f ' { hdf5_link } : { name } '
2020-05-05 13:27:22 +05:30
2022-11-06 23:40:23 +05:30
xdmf_dir . mkdir ( parents = True , exist_ok = True )
with util . open_text ( ( xdmf_dir / hdf5_name ) . with_suffix ( ' .xdmf ' ) , ' w ' ) as f :
2020-05-05 13:27:22 +05:30
f . write ( xml . dom . minidom . parseString ( ET . tostring ( xdmf ) . decode ( ) ) . toprettyxml ( ) )
2022-04-27 19:56:33 +05:30
def export_VTK ( self ,
2022-11-07 22:48:00 +05:30
output : Union [ str , List [ str ] ] = ' * ' ,
2022-04-27 19:56:33 +05:30
mode : str = ' cell ' ,
2022-11-23 02:56:15 +05:30
constituents : Optional [ IntSequence ] = None ,
target_dir : Union [ None , str , Path ] = None ,
2022-04-27 19:56:33 +05:30
fill_float : float = np . nan ,
fill_int : int = 0 ,
parallel : bool = True ) :
2020-02-21 12:15:05 +05:30
"""
2021-04-06 21:09:44 +05:30
Export to VTK cell / point data .
2020-02-21 12:15:05 +05:30
2021-04-24 10:43:36 +05:30
One VTK file per visible increment is created .
2021-06-18 19:47:39 +05:30
For point data , the VTK format is poly data ( . vtp ) .
2021-06-19 00:41:01 +05:30
For cell data , either an image ( . vti ) or unstructured ( . vtu ) dataset
2021-06-18 19:47:39 +05:30
is written for grid - based or mesh - based simulations , respectively .
2021-04-24 10:43:36 +05:30
2020-02-21 12:15:05 +05:30
Parameters
- - - - - - - - - -
2021-04-06 21:09:44 +05:30
output : ( list of ) str , optional
2021-04-24 22:42:44 +05:30
Names of the datasets to export to the VTK file .
2022-11-09 20:09:47 +05:30
Defaults to ' * ' , in which case all visible datasets are exported .
2022-11-07 22:48:00 +05:30
mode : { ' cell ' , ' point ' } , optional
2020-03-19 12:15:31 +05:30
Export in cell format or point format .
Defaults to ' cell ' .
2021-04-06 21:09:44 +05:30
constituents : ( list of ) int , optional
Constituents to consider .
Defaults to None , in which case all constituents are considered .
2022-11-06 23:40:23 +05:30
target_dir : str or pathlib . Path , optional
Directory to save VTK files . Will be created if non - existent .
2022-11-07 22:48:00 +05:30
fill_float : float , optional
2021-04-02 15:51:27 +05:30
Fill value for non - existent entries of floating point type .
2021-04-06 21:09:44 +05:30
Defaults to NaN .
2022-11-07 22:48:00 +05:30
fill_int : int , optional
2021-04-02 15:51:27 +05:30
Fill value for non - existent entries of integer type .
Defaults to 0.
2022-11-07 22:48:00 +05:30
parallel : bool , optional
2021-06-18 19:47:39 +05:30
Write VTK files in parallel in a separate background process .
2021-04-05 13:59:34 +05:30
Defaults to True .
2020-02-21 12:15:05 +05:30
"""
2020-02-21 17:33:50 +05:30
if mode . lower ( ) == ' cell ' :
2021-04-03 12:28:22 +05:30
v = self . geometry0
2020-02-21 23:22:58 +05:30
elif mode . lower ( ) == ' point ' :
2020-12-04 03:30:49 +05:30
v = VTK . from_poly_data ( self . coordinates0_point )
2021-06-01 10:19:14 +05:30
else :
2022-02-22 21:12:05 +05:30
raise ValueError ( f ' invalid mode " { mode } " ' )
2021-06-01 10:19:14 +05:30
2022-05-22 13:38:32 +05:30
v . comments = [ util . execution_stamp ( ' Result ' , ' export_VTK ' ) ]
2020-03-12 03:05:58 +05:30
2022-04-02 03:34:50 +05:30
N_digits = int ( np . floor ( np . log10 ( max ( 1 , self . incs [ - 1 ] ) ) ) ) + 1
2020-02-21 12:15:05 +05:30
2021-04-02 15:51:27 +05:30
constituents_ = constituents if isinstance ( constituents , Iterable ) else \
2022-05-09 03:51:53 +05:30
( range ( self . N_constituents ) if constituents is None else [ constituents ] ) # type: ignore
2021-04-02 15:51:27 +05:30
suffixes = [ ' ' ] if self . N_constituents == 1 or isinstance ( constituents , int ) else \
[ f ' # { c } ' for c in constituents_ ]
2021-04-05 19:11:28 +05:30
at_cell_ph , in_data_ph , at_cell_ho , in_data_ho = self . _mappings ( )
2021-04-02 15:51:27 +05:30
2022-11-06 23:40:23 +05:30
vtk_dir = Path . cwd ( ) if target_dir is None else Path ( target_dir )
vtk_dir . mkdir ( parents = True , exist_ok = True )
2021-04-02 15:51:27 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
2021-04-26 02:22:13 +05:30
if self . version_minor > = 13 :
creator = f . attrs [ ' creator ' ] if h5py3 else f . attrs [ ' creator ' ] . decode ( )
created = f . attrs [ ' created ' ] if h5py3 else f . attrs [ ' created ' ] . decode ( )
2022-12-14 00:02:19 +05:30
v . comments + = [ f ' { creator } ( { created } ) ' ]
2021-04-02 15:51:27 +05:30
for inc in util . show_progress ( self . visible [ ' increments ' ] ) :
2021-04-05 13:43:08 +05:30
u = _read ( f [ ' / ' . join ( [ inc , ' geometry ' , ' u_n ' if mode . lower ( ) == ' cell ' else ' u_p ' ] ) ] )
2022-05-12 04:24:03 +05:30
v = v . set ( ' u ' , u )
2021-04-02 15:51:27 +05:30
for ty in [ ' phase ' , ' homogenization ' ] :
for field in self . visible [ ' fields ' ] :
2022-04-27 19:56:33 +05:30
outs : Dict [ str , np . ma . core . MaskedArray ] = { }
2021-04-02 15:51:27 +05:30
for label in self . visible [ ty + ' s ' ] :
2021-04-05 13:43:08 +05:30
if field not in f [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) : continue
2021-04-02 15:51:27 +05:30
2021-04-05 13:43:08 +05:30
for out in _match ( output , f [ ' / ' . join ( [ inc , ty , label , field ] ) ] . keys ( ) ) :
data = ma . array ( _read ( f [ ' / ' . join ( [ inc , ty , label , field , out ] ) ] ) )
2021-04-02 15:51:27 +05:30
if ty == ' phase ' :
if out + suffixes [ 0 ] not in outs . keys ( ) :
for c , suffix in zip ( constituents_ , suffixes ) :
outs [ out + suffix ] = \
2021-04-06 21:09:44 +05:30
_empty_like ( data , self . N_materialpoints , fill_float , fill_int )
2021-04-02 15:51:27 +05:30
for c , suffix in zip ( constituents_ , suffixes ) :
outs [ out + suffix ] [ at_cell_ph [ c ] [ label ] ] = data [ in_data_ph [ c ] [ label ] ]
if ty == ' homogenization ' :
if out not in outs . keys ( ) :
2021-04-06 21:09:44 +05:30
outs [ out ] = _empty_like ( data , self . N_materialpoints , fill_float , fill_int )
2020-03-22 20:43:35 +05:30
2021-04-02 15:51:27 +05:30
outs [ out ] [ at_cell_ho [ label ] ] = data [ in_data_ho [ label ] ]
2020-03-22 20:43:35 +05:30
2021-04-02 15:51:27 +05:30
for label , dataset in outs . items ( ) :
2022-05-12 04:24:03 +05:30
v = v . set ( ' / ' . join ( [ ' / ' . join ( [ ty , field , label ] ) , dataset . dtype . metadata [ ' unit ' ] ] ) , dataset )
2020-03-22 20:43:35 +05:30
2021-03-30 10:54:47 +05:30
2022-11-06 23:40:23 +05:30
v . save ( vtk_dir / f ' { self . fname . stem } _inc { inc . split ( prefix_inc ) [ - 1 ] . zfill ( N_digits ) } ' ,
parallel = parallel )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
def export_DREAM3D ( self ,
target_dir : Union [ None , str , Path ] = None ) :
2023-04-07 00:26:42 +05:30
"""
Export the visible components to DREAM3D compatible files .
One DREAM3D file per visible increment is created .
The DREAM3D file is based on HDF5 file format .
Without any regridding .
Considers the original grid from DAMASK .
Needs orientation data , O , present in the file .
Parameters
- - - - - - - - - -
2023-04-17 11:33:44 +05:30
target_dir : str or pathlib . Path , optional
Directory to save DREAM3D files . Will be created if non - existent .
2023-04-07 00:26:42 +05:30
"""
Crystal_structures = { ' fcc ' : 1 ,
' bcc ' : 1 ,
' hcp ' : 0 ,
' bct ' : 7 ,
' ort ' : 6 } #TODO: is bct Tetragonal low/Tetragonal high?
Phase_types = { ' Primary ' : 0 } #further additions to these can be done by looking at 'Create Ensemble Info' filter, other options could be 'Precipitate' and so on.
dx = self . size / self . cells
2023-04-07 01:12:34 +05:30
at_cell_ph , in_data_ph , at_cell_ho , in_data_ho = self . _mappings ( )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
dream_dir = Path . cwd ( ) if target_dir is None else Path ( target_dir )
dream_dir . mkdir ( parents = True , exist_ok = True )
2023-04-07 01:12:34 +05:30
2023-04-17 11:33:44 +05:30
with h5py . File ( self . fname , ' r ' ) as f :
for inc in util . show_progress ( self . visible [ ' increments ' ] ) :
cell_orientation_array = np . zeros ( ( np . prod ( self . cells ) , 3 ) )
phase_ID_array = np . zeros ( ( np . prod ( self . cells ) ) , dtype = np . int32 ) #need to reshape it later
for c in range ( self . N_constituents ) :
for count , label in enumerate ( self . visible [ ' phases ' ] ) :
try :
data = ma . array ( _read ( f [ ' / ' . join ( [ inc , ' phase ' , label , ' mechanical/O ' ] ) ] ) )
cell_orientation_array [ at_cell_ph [ c ] [ label ] , : ] = \
Rotation ( data [ in_data_ph [ c ] [ label ] , : ] ) . as_Euler_angles ( )
# Dream3D handles euler angles better
except ValueError : #check if the exception is correct
print ( " Orientation data is not present " )
exit ( ) # need to check if such a statement would really work.
phase_ID_array [ at_cell_ph [ c ] [ label ] ] = count + 1
job_file_no_ext = os . path . splitext ( self . fname ) [ 0 ]
o = h5py . File ( f ' { dream_dir } / { job_file_no_ext } _ { inc } .dream3D ' , ' w ' )
o . attrs [ ' DADF5toDREAM3D ' ] = ' 1.0 '
o . attrs [ ' FileVersion ' ] = ' 7.0 '
for g in [ ' DataContainerBundles ' , ' Pipeline ' ] : # empty groups (needed)
o . create_group ( g )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
data_container_label = ' DataContainers/SyntheticVolumeDataContainer '
cell_data_label = data_container_label + ' /CellData '
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
# Data phases
o [ cell_data_label + ' /Phases ' ] = np . reshape ( phase_ID_array , \
tuple ( np . flip ( self . cells ) ) + ( 1 , ) )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
# Data eulers
orientation_data = cell_orientation_array . astype ( np . float32 )
o [ cell_data_label + ' /Eulers ' ] = orientation_data . reshape ( tuple ( np . flip ( self . cells ) ) + ( 3 , ) )
# Attributes to CellData group
o [ cell_data_label ] . attrs [ ' AttributeMatrixType ' ] = np . array ( [ 3 ] , np . uint32 )
o [ cell_data_label ] . attrs [ ' TupleDimensions ' ] = np . array ( self . cells , np . uint64 )
# Common Attributes for groups in CellData
for group in [ ' /Phases ' , ' /Eulers ' ] :
o [ cell_data_label + group ] . attrs [ ' DataArrayVersion ' ] = np . array ( [ 2 ] , np . int32 )
o [ cell_data_label + group ] . attrs [ ' Tuple Axis Dimensions ' ] = ' x= {} ,y= {} ,z= {} ' . format ( * np . array ( self . cells ) )
# phase attributes
o [ cell_data_label + ' /Phases ' ] . attrs [ ' ComponentDimensions ' ] = np . array ( [ 1 ] , np . uint64 )
o [ cell_data_label + ' /Phases ' ] . attrs [ ' ObjectType ' ] = ' DataArray<int32_t> '
o [ cell_data_label + ' /Phases ' ] . attrs [ ' TupleDimensions ' ] = np . array ( self . cells , np . uint64 )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
# Eulers attributes
o [ cell_data_label + ' /Eulers ' ] . attrs [ ' ComponentDimensions ' ] = np . array ( [ 3 ] , np . uint64 )
o [ cell_data_label + ' /Eulers ' ] . attrs [ ' ObjectType ' ] = ' DataArray<float> '
o [ cell_data_label + ' /Eulers ' ] . attrs [ ' TupleDimensions ' ] = np . array ( self . cells , np . uint64 )
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
# Create EnsembleAttributeMatrix
ensemble_label = data_container_label + ' /CellEnsembleData '
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
# Data CrystalStructures
o [ ensemble_label + ' /CrystalStructures ' ] = np . uint32 ( np . array ( [ 999 ] + [ 1 ] * len ( self . phases ) ) )
# assuming only cubic crystal structures
# Damask can give the crystal structure info but need to look into dream3d which crystal structure corresponds to which number
o [ ensemble_label + ' /PhaseTypes ' ] = np . uint32 ( np . array ( [ 999 ] + [ Phase_types [ ' Primary ' ] ] * len ( self . phases ) ) ) . reshape ( ( len ( self . phases ) + 1 , 1 ) )
# also assuming Primary phases
# there can be precipitates etc as well
# Attributes Ensemble Matrix
o [ ensemble_label ] . attrs [ ' AttributeMatrixType ' ] = np . array ( [ 11 ] , np . uint32 )
o [ ensemble_label ] . attrs [ ' TupleDimensions ' ] = np . array ( [ len ( self . phases ) + 1 ] , np . uint64 )
# Attributes for data in Ensemble matrix
for group in [ ' CrystalStructures ' , ' PhaseTypes ' ] : # 'PhaseName' not required MD: But would be nice to take the phase name mapping
o [ ensemble_label + ' / ' + group ] . attrs [ ' ComponentDimensions ' ] = np . array ( [ 1 ] , np . uint64 )
o [ ensemble_label + ' / ' + group ] . attrs [ ' Tuple Axis Dimensions ' ] = f ' x= { len ( self . phases ) + 1 } '
o [ ensemble_label + ' / ' + group ] . attrs [ ' DataArrayVersion ' ] = np . array ( [ 2 ] , np . int32 )
o [ ensemble_label + ' / ' + group ] . attrs [ ' ObjectType ' ] = ' DataArray<uint32_t> '
o [ ensemble_label + ' / ' + group ] . attrs [ ' TupleDimensions ' ] = np . array ( [ len ( self . phases ) + 1 ] , np . uint64 )
# Create geometry info
geom_label = data_container_label + ' /_SIMPL_GEOMETRY '
2023-04-07 00:26:42 +05:30
2023-04-17 11:33:44 +05:30
o [ geom_label + ' /DIMENSIONS ' ] = np . int64 ( np . array ( self . cells ) )
o [ geom_label + ' /ORIGIN ' ] = np . float32 ( np . zeros ( 3 ) )
o [ geom_label + ' /SPACING ' ] = np . float32 ( dx )
o [ geom_label ] . attrs [ ' GeometryName ' ] = ' ImageGeometry '
o [ geom_label ] . attrs [ ' GeometryTypeName ' ] = ' ImageGeometry '
o [ geom_label ] . attrs [ ' GeometryType ' ] = np . array ( [ 0 ] , np . uint32 )
o [ geom_label ] . attrs [ ' SpatialDimensionality ' ] = np . array ( [ 3 ] , np . uint32 )
o [ geom_label ] . attrs [ ' UnitDimensionality ' ] = np . array ( [ 3 ] , np . uint32 )
2023-04-07 00:26:42 +05:30
2021-03-30 10:54:47 +05:30
2022-11-07 03:49:14 +05:30
def export_DADF5 ( self ,
fname ,
output : Union [ str , List [ str ] ] = ' * ' ) :
"""
Export visible components into a new DADF5 file .
2022-11-08 17:09:38 +05:30
A DADF5 ( DAMASK HDF5 ) file contains DAMASK results .
Its group / folder structure reflects the layout in material . yaml .
2022-11-07 03:49:14 +05:30
Parameters
- - - - - - - - - -
fname : str or pathlib . Path
Name of the DADF5 file to be created .
2022-11-07 22:48:00 +05:30
output : ( list of ) str , optional
Names of the datasets to export .
Defaults to ' * ' , in which case all visible datasets are exported .
2022-11-07 03:49:14 +05:30
"""
if Path ( fname ) . expanduser ( ) . absolute ( ) == self . fname :
raise PermissionError ( f ' cannot overwrite { self . fname } ' )
with h5py . File ( self . fname , ' r ' ) as f_in , h5py . File ( fname , ' w ' ) as f_out :
for k , v in f_in . attrs . items ( ) :
f_out . attrs . create ( k , v )
for g in [ ' setup ' , ' geometry ' , ' cell_to ' ] :
f_in . copy ( g , f_out )
for inc in util . show_progress ( self . visible [ ' increments ' ] ) :
f_in . copy ( inc , f_out , shallow = True )
for out in _match ( output , f_in [ ' / ' . join ( [ inc , ' geometry ' ] ) ] . keys ( ) ) :
f_in [ inc ] [ ' geometry ' ] . copy ( out , f_out [ inc ] [ ' geometry ' ] )
for label in self . homogenizations :
f_in [ inc ] [ ' homogenization ' ] . copy ( label , f_out [ inc ] [ ' homogenization ' ] , shallow = True )
for label in self . phases :
f_in [ inc ] [ ' phase ' ] . copy ( label , f_out [ inc ] [ ' phase ' ] , shallow = True )
for ty in [ ' phase ' , ' homogenization ' ] :
for label in self . visible [ ty + ' s ' ] :
for field in _match ( self . visible [ ' fields ' ] , f_in [ ' / ' . join ( [ inc , ty , label ] ) ] . keys ( ) ) :
p = ' / ' . join ( [ inc , ty , label , field ] )
for out in _match ( output , f_in [ p ] . keys ( ) ) :
f_in [ p ] . copy ( out , f_out [ p ] )
2022-11-09 20:09:47 +05:30
def export_simulation_setup ( self ,
2022-04-27 19:56:33 +05:30
output : Union [ str , List [ str ] ] = ' * ' ,
2022-11-23 02:56:15 +05:30
target_dir : Union [ None , str , Path ] = None ,
2022-11-06 23:40:23 +05:30
overwrite : bool = False ,
) :
2021-07-27 13:01:57 +05:30
"""
2022-11-09 17:18:48 +05:30
Export original simulation setup of the Result object .
2021-07-27 13:01:57 +05:30
Parameters
- - - - - - - - - -
output : ( list of ) str , optional
Names of the datasets to export to the file .
2022-11-09 20:09:47 +05:30
Defaults to ' * ' , in which case all setup files are exported .
2022-11-06 23:40:23 +05:30
target_dir : str or pathlib . Path , optional
2022-11-09 20:09:47 +05:30
Directory to save setup files . Will be created if non - existent .
2022-01-13 03:43:38 +05:30
overwrite : bool , optional
2022-11-09 20:09:47 +05:30
Overwrite any existing setup files .
2021-07-27 13:01:57 +05:30
Defaults to False .
"""
2022-11-06 23:40:23 +05:30
def export ( name : str ,
obj : Union [ h5py . Dataset , h5py . Group ] ,
output : Union [ str , List [ str ] ] ,
cfg_dir : Path ,
overwrite : bool ) :
cfg = cfg_dir / name
2022-11-10 04:00:31 +05:30
if type ( obj ) == h5py . Dataset and _match ( output , [ name ] ) :
2022-11-10 01:32:38 +05:30
if cfg . exists ( ) and not overwrite :
raise PermissionError ( f ' " { cfg } " exists ' )
2021-07-27 13:01:57 +05:30
else :
2022-11-10 04:00:31 +05:30
cfg . parent . mkdir ( parents = True , exist_ok = True )
2022-11-10 01:32:38 +05:30
with util . open_text ( cfg , ' w ' ) as f_out : f_out . write ( obj [ 0 ] . decode ( ) )
2022-11-09 17:17:47 +05:30
2022-11-09 20:09:47 +05:30
cfg_dir = ( Path . cwd ( ) if target_dir is None else Path ( target_dir ) )
2021-08-02 14:08:59 +05:30
with h5py . File ( self . fname , ' r ' ) as f_in :
2022-11-06 23:40:23 +05:30
f_in [ ' setup ' ] . visititems ( partial ( export ,
output = output ,
cfg_dir = cfg_dir ,
overwrite = overwrite ) )